1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/patches/gcc/4.2.2/300-avr32-atmel-v1.1.3.patch Fri Sep 21 16:26:10 2012 +0200
1.3 @@ -0,0 +1,22620 @@
1.4 +--- a/configure.in
1.5 ++++ b/configure.in
1.6 +@@ -503,6 +503,9 @@ case "${target}" in
1.7 + arm-*-riscix*)
1.8 + noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}"
1.9 + ;;
1.10 ++ avr32-*-*)
1.11 ++ noconfigdirs="$noconfigdirs target-libiberty target-libmudflap target-libffi ${libgcj}"
1.12 ++ ;;
1.13 + avr-*-*)
1.14 + noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}"
1.15 + ;;
1.16 +--- a/gcc/builtins.c
1.17 ++++ b/gcc/builtins.c
1.18 +@@ -9223,7 +9223,7 @@ validate_arglist (tree arglist, ...)
1.19 +
1.20 + do
1.21 + {
1.22 +- code = va_arg (ap, enum tree_code);
1.23 ++ code = va_arg (ap, int);
1.24 + switch (code)
1.25 + {
1.26 + case 0:
1.27 +--- a/gcc/calls.c
1.28 ++++ b/gcc/calls.c
1.29 +@@ -3447,7 +3447,7 @@ emit_library_call_value_1 (int retval, r
1.30 + for (; count < nargs; count++)
1.31 + {
1.32 + rtx val = va_arg (p, rtx);
1.33 +- enum machine_mode mode = va_arg (p, enum machine_mode);
1.34 ++ enum machine_mode mode = va_arg (p, int);
1.35 +
1.36 + /* We cannot convert the arg value to the mode the library wants here;
1.37 + must do it earlier where we know the signedness of the arg. */
1.38 +--- a/gcc/c-incpath.c
1.39 ++++ b/gcc/c-incpath.c
1.40 +@@ -347,6 +347,18 @@ add_path (char *path, int chain, int cxx
1.41 + char* c;
1.42 + for (c = path; *c; c++)
1.43 + if (*c == '\\') *c = '/';
1.44 ++ /* Remove unnecessary trailing slashes. On some versions of MS
1.45 ++ Windows, trailing _forward_ slashes cause no problems for stat().
1.46 ++ On newer versions, stat() does not recognise a directory that ends
1.47 ++ in a '\\' or '/', unless it is a drive root dir, such as "c:/",
1.48 ++ where it is obligatory. */
1.49 ++ int pathlen = strlen (path);
1.50 ++ char* end = path + pathlen - 1;
1.51 ++ /* Preserve the lead '/' or lead "c:/". */
1.52 ++ char* start = path + (pathlen > 2 && path[1] == ':' ? 3 : 1);
1.53 ++
1.54 ++ for (; end > start && IS_DIR_SEPARATOR (*end); end--)
1.55 ++ *end = 0;
1.56 + #endif
1.57 +
1.58 + p = XNEW (cpp_dir);
1.59 +--- /dev/null
1.60 ++++ b/gcc/config/avr32/avr32.c
1.61 +@@ -0,0 +1,7915 @@
1.62 ++/*
1.63 ++ Target hooks and helper functions for AVR32.
1.64 ++ Copyright 2003-2006 Atmel Corporation.
1.65 ++
1.66 ++ Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.67 ++ Initial porting by Anders �dland.
1.68 ++
1.69 ++ This file is part of GCC.
1.70 ++
1.71 ++ This program is free software; you can redistribute it and/or modify
1.72 ++ it under the terms of the GNU General Public License as published by
1.73 ++ the Free Software Foundation; either version 2 of the License, or
1.74 ++ (at your option) any later version.
1.75 ++
1.76 ++ This program is distributed in the hope that it will be useful,
1.77 ++ but WITHOUT ANY WARRANTY; without even the implied warranty of
1.78 ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.79 ++ GNU General Public License for more details.
1.80 ++
1.81 ++ You should have received a copy of the GNU General Public License
1.82 ++ along with this program; if not, write to the Free Software
1.83 ++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
1.84 ++
1.85 ++#include "config.h"
1.86 ++#include "system.h"
1.87 ++#include "coretypes.h"
1.88 ++#include "tm.h"
1.89 ++#include "rtl.h"
1.90 ++#include "tree.h"
1.91 ++#include "obstack.h"
1.92 ++#include "regs.h"
1.93 ++#include "hard-reg-set.h"
1.94 ++#include "real.h"
1.95 ++#include "insn-config.h"
1.96 ++#include "conditions.h"
1.97 ++#include "output.h"
1.98 ++#include "insn-attr.h"
1.99 ++#include "flags.h"
1.100 ++#include "reload.h"
1.101 ++#include "function.h"
1.102 ++#include "expr.h"
1.103 ++#include "optabs.h"
1.104 ++#include "toplev.h"
1.105 ++#include "recog.h"
1.106 ++#include "ggc.h"
1.107 ++#include "except.h"
1.108 ++#include "c-pragma.h"
1.109 ++#include "integrate.h"
1.110 ++#include "tm_p.h"
1.111 ++#include "langhooks.h"
1.112 ++
1.113 ++#include "target.h"
1.114 ++#include "target-def.h"
1.115 ++
1.116 ++#include <ctype.h>
1.117 ++
1.118 ++/* Forward definitions of types. */
1.119 ++typedef struct minipool_node Mnode;
1.120 ++typedef struct minipool_fixup Mfix;
1.121 ++
1.122 ++/* Obstack for minipool constant handling. */
1.123 ++static struct obstack minipool_obstack;
1.124 ++static char *minipool_startobj;
1.125 ++static rtx minipool_vector_label;
1.126 ++
1.127 ++/* True if we are currently building a constant table. */
1.128 ++int making_const_table;
1.129 ++
1.130 ++/* Some forward function declarations */
1.131 ++static unsigned long avr32_isr_value (tree);
1.132 ++static unsigned long avr32_compute_func_type (void);
1.133 ++static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
1.134 ++static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
1.135 ++static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
1.136 ++ int flags, bool * no_add_attrs);
1.137 ++static void avr32_reorg (void);
1.138 ++bool avr32_return_in_msb (tree type);
1.139 ++bool avr32_vector_mode_supported (enum machine_mode mode);
1.140 ++static void avr32_init_libfuncs (void);
1.141 ++
1.142 ++
1.143 ++static void
1.144 ++avr32_add_gc_roots (void)
1.145 ++{
1.146 ++ gcc_obstack_init (&minipool_obstack);
1.147 ++ minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1.148 ++}
1.149 ++
1.150 ++
1.151 ++/* List of all known AVR32 parts */
1.152 ++static const struct part_type_s avr32_part_types[] = {
1.153 ++ /* name, part_type, architecture type, macro */
1.154 ++ {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
1.155 ++ {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
1.156 ++ {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
1.157 ++ {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
1.158 ++ {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
1.159 ++ {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2,
1.160 ++ "__AVR32_UC3A0128__"},
1.161 ++ {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2,
1.162 ++ "__AVR32_UC3A0256__"},
1.163 ++ {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2,
1.164 ++ "__AVR32_UC3A0512__"},
1.165 ++ {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1,
1.166 ++ "__AVR32_UC3A0512ES__"},
1.167 ++ {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2,
1.168 ++ "__AVR32_UC3A1128__"},
1.169 ++ {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2,
1.170 ++ "__AVR32_UC3A1256__"},
1.171 ++ {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2,
1.172 ++ "__AVR32_UC3A1512__"},
1.173 ++ {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1,
1.174 ++ "__AVR32_UC3A1512ES__"},
1.175 ++ {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL,
1.176 ++ "__AVR32_UC3A3256S__"},
1.177 ++ {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2,
1.178 ++ "__AVR32_UC3A364__"},
1.179 ++ {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2,
1.180 ++ "__AVR32_UC3A364S__"},
1.181 ++ {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2,
1.182 ++ "__AVR32_UC3A3128__"},
1.183 ++ {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2,
1.184 ++ "__AVR32_UC3A3128S__"},
1.185 ++ {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2,
1.186 ++ "__AVR32_UC3A3256__"},
1.187 ++ {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2,
1.188 ++ "__AVR32_UC3A3256S__"},
1.189 ++ {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1,
1.190 ++ "__AVR32_UC3B064__"},
1.191 ++ {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1,
1.192 ++ "__AVR32_UC3B0128__"},
1.193 ++ {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1,
1.194 ++ "__AVR32_UC3B0256__"},
1.195 ++ {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1,
1.196 ++ "__AVR32_UC3B0256ES__"},
1.197 ++ {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1,
1.198 ++ "__AVR32_UC3B164__"},
1.199 ++ {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1,
1.200 ++ "__AVR32_UC3B1128__"},
1.201 ++ {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1,
1.202 ++ "__AVR32_UC3B1256__"},
1.203 ++ {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1,
1.204 ++ "__AVR32_UC3B1256ES__"},
1.205 ++ {NULL, 0, 0, NULL}
1.206 ++};
1.207 ++
1.208 ++/* List of all known AVR32 architectures */
1.209 ++static const struct arch_type_s avr32_arch_types[] = {
1.210 ++ /* name, architecture type, microarchitecture type, feature flags, macro */
1.211 ++ {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
1.212 ++ (FLAG_AVR32_HAS_DSP
1.213 ++ | FLAG_AVR32_HAS_SIMD
1.214 ++ | FLAG_AVR32_HAS_UNALIGNED_WORD
1.215 ++ | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
1.216 ++ | FLAG_AVR32_HAS_CACHES),
1.217 ++ "__AVR32_AP__"},
1.218 ++ {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
1.219 ++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
1.220 ++ "__AVR32_UC__=1"},
1.221 ++ {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
1.222 ++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
1.223 ++ | FLAG_AVR32_HAS_V2_INSNS),
1.224 ++ "__AVR32_UC__=2"},
1.225 ++ {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
1.226 ++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
1.227 ++ | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
1.228 ++ "__AVR32_UC__=3"},
1.229 ++ {NULL, 0, 0, 0, NULL}
1.230 ++};
1.231 ++
1.232 ++/* Default arch name */
1.233 ++const char *avr32_arch_name = "none";
1.234 ++const char *avr32_part_name = "none";
1.235 ++
1.236 ++const struct part_type_s *avr32_part;
1.237 ++const struct arch_type_s *avr32_arch;
1.238 ++
1.239 ++
1.240 ++/* Set default target_flags. */
1.241 ++#undef TARGET_DEFAULT_TARGET_FLAGS
1.242 ++#define TARGET_DEFAULT_TARGET_FLAGS \
1.243 ++ (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
1.244 ++
1.245 ++void
1.246 ++avr32_optimization_options (int level,
1.247 ++ int size){
1.248 ++ if (AVR32_ALWAYS_PIC)
1.249 ++ flag_pic = 1;
1.250 ++
1.251 ++ /* Enable section anchors if optimization is enabled. */
1.252 ++ if (level > 0 || size)
1.253 ++ flag_section_anchors = 1;
1.254 ++}
1.255 ++
1.256 ++/* Override command line options */
1.257 ++void
1.258 ++avr32_override_options (void)
1.259 ++{
1.260 ++ const struct part_type_s *part;
1.261 ++ const struct arch_type_s *arch;
1.262 ++
1.263 ++ /*Add backward compability*/
1.264 ++ if (strcmp ("uc", avr32_arch_name)== 0)
1.265 ++ {
1.266 ++ fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
1.267 ++ "Please use '-march=ucr1' instead. "
1.268 ++ "Converting to arch 'ucr1'\n",
1.269 ++ avr32_arch_name);
1.270 ++ avr32_arch_name="ucr1";
1.271 ++ }
1.272 ++
1.273 ++ /* Check if arch type is set. */
1.274 ++ for (arch = avr32_arch_types; arch->name; arch++)
1.275 ++ {
1.276 ++ if (strcmp (arch->name, avr32_arch_name) == 0)
1.277 ++ break;
1.278 ++ }
1.279 ++ avr32_arch = arch;
1.280 ++
1.281 ++ if (!arch->name && strcmp("none", avr32_arch_name) != 0)
1.282 ++ {
1.283 ++ fprintf (stderr, "Unknown arch `%s' specified\n"
1.284 ++ "Known arch names:\n"
1.285 ++ "\tuc (deprecated)\n",
1.286 ++ avr32_arch_name);
1.287 ++ for (arch = avr32_arch_types; arch->name; arch++)
1.288 ++ fprintf (stderr, "\t%s\n", arch->name);
1.289 ++ avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
1.290 ++ }
1.291 ++
1.292 ++ /* Check if part type is set. */
1.293 ++ for (part = avr32_part_types; part->name; part++)
1.294 ++ if (strcmp (part->name, avr32_part_name) == 0)
1.295 ++ break;
1.296 ++
1.297 ++ avr32_part = part;
1.298 ++ if (!part->name)
1.299 ++ {
1.300 ++ fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
1.301 ++ avr32_part_name);
1.302 ++ for (part = avr32_part_types; part->name; part++)
1.303 ++ {
1.304 ++ if (strcmp("none", part->name) != 0)
1.305 ++ fprintf (stderr, "\t%s\n", part->name);
1.306 ++ }
1.307 ++ /* Set default to NONE*/
1.308 ++ avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
1.309 ++ }
1.310 ++
1.311 ++ /* NB! option -march= overrides option -mpart
1.312 ++ * if both are used at the same time */
1.313 ++ if (!arch->name)
1.314 ++ avr32_arch = &avr32_arch_types[avr32_part->arch_type];
1.315 ++
1.316 ++ /* If optimization level is two or greater, then align start of loops to a
1.317 ++ word boundary since this will allow folding the first insn of the loop.
1.318 ++ Do this only for targets supporting branch prediction. */
1.319 ++ if (optimize >= 2 && TARGET_BRANCH_PRED)
1.320 ++ align_loops = 2;
1.321 ++
1.322 ++
1.323 ++ /* Enable fast-float library if unsafe math optimizations
1.324 ++ are used. */
1.325 ++ if (flag_unsafe_math_optimizations)
1.326 ++ target_flags |= MASK_FAST_FLOAT;
1.327 ++
1.328 ++ /* Check if we should set avr32_imm_in_const_pool
1.329 ++ based on if caches are present or not. */
1.330 ++ if ( avr32_imm_in_const_pool == -1 )
1.331 ++ {
1.332 ++ if ( TARGET_CACHES )
1.333 ++ avr32_imm_in_const_pool = 1;
1.334 ++ else
1.335 ++ avr32_imm_in_const_pool = 0;
1.336 ++ }
1.337 ++
1.338 ++ if (TARGET_NO_PIC)
1.339 ++ flag_pic = 0;
1.340 ++
1.341 ++ avr32_add_gc_roots ();
1.342 ++}
1.343 ++
1.344 ++
1.345 ++/*
1.346 ++If defined, a function that outputs the assembler code for entry to a
1.347 ++function. The prologue is responsible for setting up the stack frame,
1.348 ++initializing the frame pointer register, saving registers that must be
1.349 ++saved, and allocating size additional bytes of storage for the
1.350 ++local variables. size is an integer. file is a stdio
1.351 ++stream to which the assembler code should be output.
1.352 ++
1.353 ++The label for the beginning of the function need not be output by this
1.354 ++macro. That has already been done when the macro is run.
1.355 ++
1.356 ++To determine which registers to save, the macro can refer to the array
1.357 ++regs_ever_live: element r is nonzero if hard register
1.358 ++r is used anywhere within the function. This implies the function
1.359 ++prologue should save register r, provided it is not one of the
1.360 ++call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
1.361 ++regs_ever_live.)
1.362 ++
1.363 ++On machines that have ``register windows'', the function entry code does
1.364 ++not save on the stack the registers that are in the windows, even if
1.365 ++they are supposed to be preserved by function calls; instead it takes
1.366 ++appropriate steps to ``push'' the register stack, if any non-call-used
1.367 ++registers are used in the function.
1.368 ++
1.369 ++On machines where functions may or may not have frame-pointers, the
1.370 ++function entry code must vary accordingly; it must set up the frame
1.371 ++pointer if one is wanted, and not otherwise. To determine whether a
1.372 ++frame pointer is in wanted, the macro can refer to the variable
1.373 ++frame_pointer_needed. The variable's value will be 1 at run
1.374 ++time in a function that needs a frame pointer. (see Elimination).
1.375 ++
1.376 ++The function entry code is responsible for allocating any stack space
1.377 ++required for the function. This stack space consists of the regions
1.378 ++listed below. In most cases, these regions are allocated in the
1.379 ++order listed, with the last listed region closest to the top of the
1.380 ++stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
1.381 ++the highest address if it is not defined). You can use a different order
1.382 ++for a machine if doing so is more convenient or required for
1.383 ++compatibility reasons. Except in cases where required by standard
1.384 ++or by a debugger, there is no reason why the stack layout used by GCC
1.385 ++need agree with that used by other compilers for a machine.
1.386 ++*/
1.387 ++
1.388 ++#undef TARGET_ASM_FUNCTION_PROLOGUE
1.389 ++#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
1.390 ++
1.391 ++
1.392 ++#undef TARGET_DEFAULT_SHORT_ENUMS
1.393 ++#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
1.394 ++
1.395 ++#undef TARGET_PROMOTE_FUNCTION_ARGS
1.396 ++#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1.397 ++
1.398 ++#undef TARGET_PROMOTE_FUNCTION_RETURN
1.399 ++#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1.400 ++
1.401 ++#undef TARGET_PROMOTE_PROTOTYPES
1.402 ++#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1.403 ++
1.404 ++#undef TARGET_MUST_PASS_IN_STACK
1.405 ++#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
1.406 ++
1.407 ++#undef TARGET_PASS_BY_REFERENCE
1.408 ++#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
1.409 ++
1.410 ++#undef TARGET_STRICT_ARGUMENT_NAMING
1.411 ++#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
1.412 ++
1.413 ++#undef TARGET_VECTOR_MODE_SUPPORTED_P
1.414 ++#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
1.415 ++
1.416 ++#undef TARGET_RETURN_IN_MEMORY
1.417 ++#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
1.418 ++
1.419 ++#undef TARGET_RETURN_IN_MSB
1.420 ++#define TARGET_RETURN_IN_MSB avr32_return_in_msb
1.421 ++
1.422 ++#undef TARGET_ENCODE_SECTION_INFO
1.423 ++#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info
1.424 ++
1.425 ++#undef TARGET_ARG_PARTIAL_BYTES
1.426 ++#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
1.427 ++
1.428 ++#undef TARGET_STRIP_NAME_ENCODING
1.429 ++#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
1.430 ++
1.431 ++#define streq(string1, string2) (strcmp (string1, string2) == 0)
1.432 ++
1.433 ++#undef TARGET_NARROW_VOLATILE_BITFIELD
1.434 ++#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
1.435 ++
1.436 ++#undef TARGET_ATTRIBUTE_TABLE
1.437 ++#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
1.438 ++
1.439 ++#undef TARGET_COMP_TYPE_ATTRIBUTES
1.440 ++#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
1.441 ++
1.442 ++
1.443 ++#undef TARGET_RTX_COSTS
1.444 ++#define TARGET_RTX_COSTS avr32_rtx_costs
1.445 ++
1.446 ++#undef TARGET_CANNOT_FORCE_CONST_MEM
1.447 ++#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
1.448 ++
1.449 ++#undef TARGET_ASM_INTEGER
1.450 ++#define TARGET_ASM_INTEGER avr32_assemble_integer
1.451 ++
1.452 ++#undef TARGET_FUNCTION_VALUE
1.453 ++#define TARGET_FUNCTION_VALUE avr32_function_value
1.454 ++
1.455 ++#undef TARGET_MIN_ANCHOR_OFFSET
1.456 ++#define TARGET_MIN_ANCHOR_OFFSET (0)
1.457 ++
1.458 ++#undef TARGET_MAX_ANCHOR_OFFSET
1.459 ++#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
1.460 ++
1.461 ++#undef TARGET_SECONDARY_RELOAD
1.462 ++#define TARGET_SECONDARY_RELOAD avr32_secondary_reload
1.463 ++
1.464 ++enum reg_class
1.465 ++avr32_secondary_reload (bool in_p, rtx x, enum reg_class class ATTRIBUTE_UNUSED,
1.466 ++ enum machine_mode mode, secondary_reload_info *sri)
1.467 ++{
1.468 ++
1.469 ++ if ( avr32_rmw_memory_operand (x, mode) )
1.470 ++ {
1.471 ++ if (!in_p)
1.472 ++ sri->icode = CODE_FOR_reload_out_rmw_memory_operand;
1.473 ++ else
1.474 ++ sri->icode = CODE_FOR_reload_in_rmw_memory_operand;
1.475 ++ }
1.476 ++ return NO_REGS;
1.477 ++
1.478 ++}
1.479 ++
1.480 ++/*
1.481 ++ * Switches to the appropriate section for output of constant pool
1.482 ++ * entry x in mode. You can assume that x is some kind of constant in
1.483 ++ * RTL. The argument mode is redundant except in the case of a
1.484 ++ * const_int rtx. Select the section by calling readonly_data_ section
1.485 ++ * or one of the alternatives for other sections. align is the
1.486 ++ * constant alignment in bits.
1.487 ++ *
1.488 ++ * The default version of this function takes care of putting symbolic
1.489 ++ * constants in flag_ pic mode in data_section and everything else in
1.490 ++ * readonly_data_section.
1.491 ++ */
1.492 ++//#undef TARGET_ASM_SELECT_RTX_SECTION
1.493 ++//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
1.494 ++
1.495 ++
1.496 ++/*
1.497 ++ * If non-null, this hook performs a target-specific pass over the
1.498 ++ * instruction stream. The compiler will run it at all optimization
1.499 ++ * levels, just before the point at which it normally does
1.500 ++ * delayed-branch scheduling.
1.501 ++ *
1.502 ++ * The exact purpose of the hook varies from target to target. Some
1.503 ++ * use it to do transformations that are necessary for correctness,
1.504 ++ * such as laying out in-function constant pools or avoiding hardware
1.505 ++ * hazards. Others use it as an opportunity to do some
1.506 ++ * machine-dependent optimizations.
1.507 ++ *
1.508 ++ * You need not implement the hook if it has nothing to do. The
1.509 ++ * default definition is null.
1.510 ++ */
1.511 ++#undef TARGET_MACHINE_DEPENDENT_REORG
1.512 ++#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
1.513 ++
1.514 ++/* Target hook for assembling integer objects.
1.515 ++ Need to handle integer vectors */
1.516 ++static bool
1.517 ++avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
1.518 ++{
1.519 ++ if (avr32_vector_mode_supported (GET_MODE (x)))
1.520 ++ {
1.521 ++ int i, units;
1.522 ++
1.523 ++ if (GET_CODE (x) != CONST_VECTOR)
1.524 ++ abort ();
1.525 ++
1.526 ++ units = CONST_VECTOR_NUNITS (x);
1.527 ++
1.528 ++ switch (GET_MODE (x))
1.529 ++ {
1.530 ++ case V2HImode:
1.531 ++ size = 2;
1.532 ++ break;
1.533 ++ case V4QImode:
1.534 ++ size = 1;
1.535 ++ break;
1.536 ++ default:
1.537 ++ abort ();
1.538 ++ }
1.539 ++
1.540 ++ for (i = 0; i < units; i++)
1.541 ++ {
1.542 ++ rtx elt;
1.543 ++
1.544 ++ elt = CONST_VECTOR_ELT (x, i);
1.545 ++ assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
1.546 ++ }
1.547 ++
1.548 ++ return true;
1.549 ++ }
1.550 ++
1.551 ++ return default_assemble_integer (x, size, aligned_p);
1.552 ++}
1.553 ++
1.554 ++/*
1.555 ++ * This target hook describes the relative costs of RTL expressions.
1.556 ++ *
1.557 ++ * The cost may depend on the precise form of the expression, which is
1.558 ++ * available for examination in x, and the rtx code of the expression
1.559 ++ * in which it is contained, found in outer_code. code is the
1.560 ++ * expression code--redundant, since it can be obtained with GET_CODE
1.561 ++ * (x).
1.562 ++ *
1.563 ++ * In implementing this hook, you can use the construct COSTS_N_INSNS
1.564 ++ * (n) to specify a cost equal to n fast instructions.
1.565 ++ *
1.566 ++ * On entry to the hook, *total contains a default estimate for the
1.567 ++ * cost of the expression. The hook should modify this value as
1.568 ++ * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
1.569 ++ * for multiplications, COSTS_N_INSNS (7) for division and modulus
1.570 ++ * operations, and COSTS_N_INSNS (1) for all other operations.
1.571 ++ *
1.572 ++ * When optimizing for code size, i.e. when optimize_size is non-zero,
1.573 ++ * this target hook should be used to estimate the relative size cost
1.574 ++ * of an expression, again relative to COSTS_N_INSNS.
1.575 ++ *
1.576 ++ * The hook returns true when all subexpressions of x have been
1.577 ++ * processed, and false when rtx_cost should recurse.
1.578 ++ */
1.579 ++
1.580 ++/* Worker routine for avr32_rtx_costs. */
1.581 ++static inline int
1.582 ++avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
1.583 ++ enum rtx_code outer ATTRIBUTE_UNUSED)
1.584 ++{
1.585 ++ enum machine_mode mode = GET_MODE (x);
1.586 ++
1.587 ++ switch (GET_CODE (x))
1.588 ++ {
1.589 ++ case MEM:
1.590 ++ /* Using pre decrement / post increment memory operations on the
1.591 ++ avr32_uc architecture means that two writebacks must be performed
1.592 ++ and hence two cycles are needed. */
1.593 ++ if (!optimize_size
1.594 ++ && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
1.595 ++ && TARGET_ARCH_UC
1.596 ++ && (GET_CODE (XEXP (x, 0)) == PRE_DEC
1.597 ++ || GET_CODE (XEXP (x, 0)) == POST_INC))
1.598 ++ return COSTS_N_INSNS (5);
1.599 ++
1.600 ++ /* Memory costs quite a lot for the first word, but subsequent words
1.601 ++ load at the equivalent of a single insn each. */
1.602 ++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
1.603 ++ return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
1.604 ++
1.605 ++ return COSTS_N_INSNS (4);
1.606 ++ case SYMBOL_REF:
1.607 ++ case CONST:
1.608 ++ /* These are valid for the pseudo insns: lda.w and call which operates
1.609 ++ on direct addresses. We assume that the cost of a lda.w is the same
1.610 ++ as the cost of a ld.w insn. */
1.611 ++ return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1);
1.612 ++ case DIV:
1.613 ++ case MOD:
1.614 ++ case UDIV:
1.615 ++ case UMOD:
1.616 ++ return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
1.617 ++
1.618 ++ case ROTATE:
1.619 ++ case ROTATERT:
1.620 ++ if (mode == TImode)
1.621 ++ return COSTS_N_INSNS (100);
1.622 ++
1.623 ++ if (mode == DImode)
1.624 ++ return COSTS_N_INSNS (10);
1.625 ++ return COSTS_N_INSNS (4);
1.626 ++ case ASHIFT:
1.627 ++ case LSHIFTRT:
1.628 ++ case ASHIFTRT:
1.629 ++ case NOT:
1.630 ++ if (mode == TImode)
1.631 ++ return COSTS_N_INSNS (10);
1.632 ++
1.633 ++ if (mode == DImode)
1.634 ++ return COSTS_N_INSNS (4);
1.635 ++ return COSTS_N_INSNS (1);
1.636 ++ case PLUS:
1.637 ++ case MINUS:
1.638 ++ case NEG:
1.639 ++ case COMPARE:
1.640 ++ case ABS:
1.641 ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1.642 ++ return COSTS_N_INSNS (100);
1.643 ++
1.644 ++ if (mode == TImode)
1.645 ++ return COSTS_N_INSNS (50);
1.646 ++
1.647 ++ if (mode == DImode)
1.648 ++ return COSTS_N_INSNS (2);
1.649 ++ return COSTS_N_INSNS (1);
1.650 ++
1.651 ++ case MULT:
1.652 ++ {
1.653 ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1.654 ++ return COSTS_N_INSNS (300);
1.655 ++
1.656 ++ if (mode == TImode)
1.657 ++ return COSTS_N_INSNS (16);
1.658 ++
1.659 ++ if (mode == DImode)
1.660 ++ return COSTS_N_INSNS (4);
1.661 ++
1.662 ++ if (mode == HImode)
1.663 ++ return COSTS_N_INSNS (2);
1.664 ++
1.665 ++ return COSTS_N_INSNS (3);
1.666 ++ }
1.667 ++ case IF_THEN_ELSE:
1.668 ++ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
1.669 ++ return COSTS_N_INSNS (4);
1.670 ++ return COSTS_N_INSNS (1);
1.671 ++ case SIGN_EXTEND:
1.672 ++ case ZERO_EXTEND:
1.673 ++ /* Sign/Zero extensions of registers cost quite much since these
1.674 ++ instrcutions only take one register operand which means that gcc
1.675 ++ often must insert some move instrcutions */
1.676 ++ if (mode == QImode || mode == HImode)
1.677 ++ return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
1.678 ++ return COSTS_N_INSNS (4);
1.679 ++ case UNSPEC:
1.680 ++ /* divmod operations */
1.681 ++ if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
1.682 ++ || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
1.683 ++ {
1.684 ++ return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
1.685 ++ }
1.686 ++ /* Fallthrough */
1.687 ++ default:
1.688 ++ return COSTS_N_INSNS (1);
1.689 ++ }
1.690 ++}
1.691 ++
1.692 ++static bool
1.693 ++avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
1.694 ++{
1.695 ++ *total = avr32_rtx_costs_1 (x, code, outer_code);
1.696 ++ return true;
1.697 ++}
1.698 ++
1.699 ++
1.700 ++bool
1.701 ++avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
1.702 ++{
1.703 ++ /* Do not want symbols in the constant pool when compiling pic or if using
1.704 ++ address pseudo instructions. */
1.705 ++ return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
1.706 ++ && avr32_find_symbol (x) != NULL_RTX);
1.707 ++}
1.708 ++
1.709 ++
1.710 ++/* Table of machine attributes. */
1.711 ++const struct attribute_spec avr32_attribute_table[] = {
1.712 ++ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1.713 ++ /* Interrupt Service Routines have special prologue and epilogue
1.714 ++ requirements. */
1.715 ++ {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
1.716 ++ {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
1.717 ++ {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
1.718 ++ {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
1.719 ++ {"rmw_addressable", 0, 0, true, false, false, NULL},
1.720 ++ {NULL, 0, 0, false, false, false, NULL}
1.721 ++};
1.722 ++
1.723 ++
1.724 ++typedef struct
1.725 ++{
1.726 ++ const char *const arg;
1.727 ++ const unsigned long return_value;
1.728 ++}
1.729 ++isr_attribute_arg;
1.730 ++
1.731 ++static const isr_attribute_arg isr_attribute_args[] = {
1.732 ++ {"FULL", AVR32_FT_ISR_FULL},
1.733 ++ {"full", AVR32_FT_ISR_FULL},
1.734 ++ {"HALF", AVR32_FT_ISR_HALF},
1.735 ++ {"half", AVR32_FT_ISR_HALF},
1.736 ++ {"NONE", AVR32_FT_ISR_NONE},
1.737 ++ {"none", AVR32_FT_ISR_NONE},
1.738 ++ {"UNDEF", AVR32_FT_ISR_NONE},
1.739 ++ {"undef", AVR32_FT_ISR_NONE},
1.740 ++ {"SWI", AVR32_FT_ISR_NONE},
1.741 ++ {"swi", AVR32_FT_ISR_NONE},
1.742 ++ {NULL, AVR32_FT_ISR_NONE}
1.743 ++};
1.744 ++
1.745 ++/* Returns the (interrupt) function type of the current
1.746 ++ function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
1.747 ++
1.748 ++static unsigned long
1.749 ++avr32_isr_value (tree argument)
1.750 ++{
1.751 ++ const isr_attribute_arg *ptr;
1.752 ++ const char *arg;
1.753 ++
1.754 ++ /* No argument - default to ISR_NONE. */
1.755 ++ if (argument == NULL_TREE)
1.756 ++ return AVR32_FT_ISR_NONE;
1.757 ++
1.758 ++ /* Get the value of the argument. */
1.759 ++ if (TREE_VALUE (argument) == NULL_TREE
1.760 ++ || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1.761 ++ return AVR32_FT_UNKNOWN;
1.762 ++
1.763 ++ arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1.764 ++
1.765 ++ /* Check it against the list of known arguments. */
1.766 ++ for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1.767 ++ if (streq (arg, ptr->arg))
1.768 ++ return ptr->return_value;
1.769 ++
1.770 ++ /* An unrecognized interrupt type. */
1.771 ++ return AVR32_FT_UNKNOWN;
1.772 ++}
1.773 ++
1.774 ++
1.775 ++
1.776 ++/*
1.777 ++These hooks specify assembly directives for creating certain kinds
1.778 ++of integer object. The TARGET_ASM_BYTE_OP directive creates a
1.779 ++byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
1.780 ++aligned two-byte object, and so on. Any of the hooks may be
1.781 ++NULL, indicating that no suitable directive is available.
1.782 ++
1.783 ++The compiler will print these strings at the start of a new line,
1.784 ++followed immediately by the object's initial value. In most cases,
1.785 ++the string should contain a tab, a pseudo-op, and then another tab.
1.786 ++*/
1.787 ++#undef TARGET_ASM_BYTE_OP
1.788 ++#define TARGET_ASM_BYTE_OP "\t.byte\t"
1.789 ++#undef TARGET_ASM_ALIGNED_HI_OP
1.790 ++#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
1.791 ++#undef TARGET_ASM_ALIGNED_SI_OP
1.792 ++#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
1.793 ++#undef TARGET_ASM_ALIGNED_DI_OP
1.794 ++#define TARGET_ASM_ALIGNED_DI_OP NULL
1.795 ++#undef TARGET_ASM_ALIGNED_TI_OP
1.796 ++#define TARGET_ASM_ALIGNED_TI_OP NULL
1.797 ++#undef TARGET_ASM_UNALIGNED_HI_OP
1.798 ++#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1.799 ++#undef TARGET_ASM_UNALIGNED_SI_OP
1.800 ++#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
1.801 ++#undef TARGET_ASM_UNALIGNED_DI_OP
1.802 ++#define TARGET_ASM_UNALIGNED_DI_OP NULL
1.803 ++#undef TARGET_ASM_UNALIGNED_TI_OP
1.804 ++#define TARGET_ASM_UNALIGNED_TI_OP NULL
1.805 ++
1.806 ++#undef TARGET_ASM_OUTPUT_MI_THUNK
1.807 ++#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
1.808 ++
1.809 ++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1.810 ++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1.811 ++
1.812 ++static void
1.813 ++avr32_output_mi_thunk (FILE * file,
1.814 ++ tree thunk ATTRIBUTE_UNUSED,
1.815 ++ HOST_WIDE_INT delta,
1.816 ++ HOST_WIDE_INT vcall_offset, tree function)
1.817 ++ {
1.818 ++ int mi_delta = delta;
1.819 ++ int this_regno =
1.820 ++ (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ?
1.821 ++ INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
1.822 ++
1.823 ++
1.824 ++ if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
1.825 ++ || vcall_offset)
1.826 ++ {
1.827 ++ fputs ("\tpushm\tlr\n", file);
1.828 ++ }
1.829 ++
1.830 ++
1.831 ++ if (mi_delta != 0)
1.832 ++ {
1.833 ++ if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
1.834 ++ {
1.835 ++ fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
1.836 ++ }
1.837 ++ else
1.838 ++ {
1.839 ++ /* Immediate is larger than k21 we must make us a temp register by
1.840 ++ pushing a register to the stack. */
1.841 ++ fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
1.842 ++ fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
1.843 ++ fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
1.844 ++ }
1.845 ++ }
1.846 ++
1.847 ++
1.848 ++ if (vcall_offset != 0)
1.849 ++ {
1.850 ++ fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]);
1.851 ++ fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset);
1.852 ++ fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
1.853 ++ }
1.854 ++
1.855 ++
1.856 ++ if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
1.857 ++ || vcall_offset)
1.858 ++ {
1.859 ++ fputs ("\tpopm\tlr\n", file);
1.860 ++ }
1.861 ++
1.862 ++ /* Jump to the function. We assume that we can use an rjmp since the
1.863 ++ function to jump to is local and probably not too far away from
1.864 ++ the thunk. If this assumption proves to be wrong we could implement
1.865 ++ this jump by calculating the offset between the jump source and destination
1.866 ++ and put this in the constant pool and then perform an add to pc.
1.867 ++ This would also be legitimate PIC code. But for now we hope that an rjmp
1.868 ++ will be sufficient...
1.869 ++ */
1.870 ++ fputs ("\trjmp\t", file);
1.871 ++ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
1.872 ++ fputc ('\n', file);
1.873 ++ }
1.874 ++
1.875 ++
1.876 ++/* Implements target hook vector_mode_supported. */
1.877 ++bool
1.878 ++avr32_vector_mode_supported (enum machine_mode mode)
1.879 ++{
1.880 ++ if ((mode == V2HImode) || (mode == V4QImode))
1.881 ++ return true;
1.882 ++
1.883 ++ return false;
1.884 ++}
1.885 ++
1.886 ++
1.887 ++#undef TARGET_INIT_LIBFUNCS
1.888 ++#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
1.889 ++
1.890 ++#undef TARGET_INIT_BUILTINS
1.891 ++#define TARGET_INIT_BUILTINS avr32_init_builtins
1.892 ++
1.893 ++#undef TARGET_EXPAND_BUILTIN
1.894 ++#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
1.895 ++
1.896 ++tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
1.897 ++ void_ftype_ptr_int;
1.898 ++tree void_ftype_int, void_ftype_void, int_ftype_ptr_int;
1.899 ++tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
1.900 ++ short_ftype_short_short;
1.901 ++tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
1.902 ++tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
1.903 ++tree longlong_ftype_int_int, void_ftype_int_int_longlong;
1.904 ++tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
1.905 ++tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
1.906 ++
1.907 ++#define def_builtin(NAME, TYPE, CODE) \
1.908 ++ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
1.909 ++ BUILT_IN_MD, NULL, NULL_TREE)
1.910 ++
1.911 ++#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
1.912 ++ do \
1.913 ++ { \
1.914 ++ if ((MASK)) \
1.915 ++ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
1.916 ++ BUILT_IN_MD, NULL, NULL_TREE); \
1.917 ++ } \
1.918 ++ while (0)
1.919 ++
1.920 ++struct builtin_description
1.921 ++{
1.922 ++ const unsigned int mask;
1.923 ++ const enum insn_code icode;
1.924 ++ const char *const name;
1.925 ++ const int code;
1.926 ++ const enum rtx_code comparison;
1.927 ++ const unsigned int flag;
1.928 ++ const tree *ftype;
1.929 ++};
1.930 ++
1.931 ++static const struct builtin_description bdesc_2arg[] = {
1.932 ++#define DSP_BUILTIN(code, builtin, ftype) \
1.933 ++ { 1, CODE_FOR_##code, "__builtin_" #code , \
1.934 ++ AVR32_BUILTIN_##builtin, 0, 0, ftype }
1.935 ++
1.936 ++ DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
1.937 ++ DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
1.938 ++ DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
1.939 ++ DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
1.940 ++ DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
1.941 ++ DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
1.942 ++ DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
1.943 ++ DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
1.944 ++ DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
1.945 ++ DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
1.946 ++ DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
1.947 ++};
1.948 ++
1.949 ++
1.950 ++void
1.951 ++avr32_init_builtins (void)
1.952 ++{
1.953 ++ unsigned int i;
1.954 ++ const struct builtin_description *d;
1.955 ++ tree endlink = void_list_node;
1.956 ++ tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
1.957 ++ tree longlong_endlink =
1.958 ++ tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
1.959 ++ tree short_endlink =
1.960 ++ tree_cons (NULL_TREE, short_integer_type_node, endlink);
1.961 ++ tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
1.962 ++
1.963 ++ /* int func (int) */
1.964 ++ int_ftype_int = build_function_type (integer_type_node, int_endlink);
1.965 ++
1.966 ++ /* short func (short) */
1.967 ++ short_ftype_short
1.968 ++ = build_function_type (short_integer_type_node, short_endlink);
1.969 ++
1.970 ++ /* short func (short, short) */
1.971 ++ short_ftype_short_short
1.972 ++ = build_function_type (short_integer_type_node,
1.973 ++ tree_cons (NULL_TREE, short_integer_type_node,
1.974 ++ short_endlink));
1.975 ++
1.976 ++ /* long long func (long long, short, short) */
1.977 ++ longlong_ftype_longlong_short_short
1.978 ++ = build_function_type (long_long_integer_type_node,
1.979 ++ tree_cons (NULL_TREE, long_long_integer_type_node,
1.980 ++ tree_cons (NULL_TREE,
1.981 ++ short_integer_type_node,
1.982 ++ short_endlink)));
1.983 ++
1.984 ++ /* long long func (short, short) */
1.985 ++ longlong_ftype_short_short
1.986 ++ = build_function_type (long_long_integer_type_node,
1.987 ++ tree_cons (NULL_TREE, short_integer_type_node,
1.988 ++ short_endlink));
1.989 ++
1.990 ++ /* int func (int, int) */
1.991 ++ int_ftype_int_int
1.992 ++ = build_function_type (integer_type_node,
1.993 ++ tree_cons (NULL_TREE, integer_type_node,
1.994 ++ int_endlink));
1.995 ++
1.996 ++ /* long long func (int, int) */
1.997 ++ longlong_ftype_int_int
1.998 ++ = build_function_type (long_long_integer_type_node,
1.999 ++ tree_cons (NULL_TREE, integer_type_node,
1.1000 ++ int_endlink));
1.1001 ++
1.1002 ++ /* long long int func (long long, int, short) */
1.1003 ++ longlong_ftype_longlong_int_short
1.1004 ++ = build_function_type (long_long_integer_type_node,
1.1005 ++ tree_cons (NULL_TREE, long_long_integer_type_node,
1.1006 ++ tree_cons (NULL_TREE, integer_type_node,
1.1007 ++ short_endlink)));
1.1008 ++
1.1009 ++ /* long long int func (int, short) */
1.1010 ++ longlong_ftype_int_short
1.1011 ++ = build_function_type (long_long_integer_type_node,
1.1012 ++ tree_cons (NULL_TREE, integer_type_node,
1.1013 ++ short_endlink));
1.1014 ++
1.1015 ++ /* int func (int, short, short) */
1.1016 ++ int_ftype_int_short_short
1.1017 ++ = build_function_type (integer_type_node,
1.1018 ++ tree_cons (NULL_TREE, integer_type_node,
1.1019 ++ tree_cons (NULL_TREE,
1.1020 ++ short_integer_type_node,
1.1021 ++ short_endlink)));
1.1022 ++
1.1023 ++ /* int func (short, short) */
1.1024 ++ int_ftype_short_short
1.1025 ++ = build_function_type (integer_type_node,
1.1026 ++ tree_cons (NULL_TREE, short_integer_type_node,
1.1027 ++ short_endlink));
1.1028 ++
1.1029 ++ /* int func (int, short) */
1.1030 ++ int_ftype_int_short
1.1031 ++ = build_function_type (integer_type_node,
1.1032 ++ tree_cons (NULL_TREE, integer_type_node,
1.1033 ++ short_endlink));
1.1034 ++
1.1035 ++ /* void func (int, int) */
1.1036 ++ void_ftype_int_int
1.1037 ++ = build_function_type (void_type_node,
1.1038 ++ tree_cons (NULL_TREE, integer_type_node,
1.1039 ++ int_endlink));
1.1040 ++
1.1041 ++ /* void func (int, int, int) */
1.1042 ++ void_ftype_int_int_int
1.1043 ++ = build_function_type (void_type_node,
1.1044 ++ tree_cons (NULL_TREE, integer_type_node,
1.1045 ++ tree_cons (NULL_TREE, integer_type_node,
1.1046 ++ int_endlink)));
1.1047 ++
1.1048 ++ /* void func (int, int, long long) */
1.1049 ++ void_ftype_int_int_longlong
1.1050 ++ = build_function_type (void_type_node,
1.1051 ++ tree_cons (NULL_TREE, integer_type_node,
1.1052 ++ tree_cons (NULL_TREE, integer_type_node,
1.1053 ++ longlong_endlink)));
1.1054 ++
1.1055 ++ /* void func (int, int, int, int, int) */
1.1056 ++ void_ftype_int_int_int_int_int
1.1057 ++ = build_function_type (void_type_node,
1.1058 ++ tree_cons (NULL_TREE, integer_type_node,
1.1059 ++ tree_cons (NULL_TREE, integer_type_node,
1.1060 ++ tree_cons (NULL_TREE,
1.1061 ++ integer_type_node,
1.1062 ++ tree_cons
1.1063 ++ (NULL_TREE,
1.1064 ++ integer_type_node,
1.1065 ++ int_endlink)))));
1.1066 ++
1.1067 ++ /* void func (void *, int) */
1.1068 ++ void_ftype_ptr_int
1.1069 ++ = build_function_type (void_type_node,
1.1070 ++ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1.1071 ++
1.1072 ++ /* void func (int) */
1.1073 ++ void_ftype_int = build_function_type (void_type_node, int_endlink);
1.1074 ++
1.1075 ++ /* void func (void) */
1.1076 ++ void_ftype_void = build_function_type (void_type_node, void_endlink);
1.1077 ++
1.1078 ++ /* int func (void) */
1.1079 ++ int_ftype_void = build_function_type (integer_type_node, void_endlink);
1.1080 ++
1.1081 ++ /* int func (void *, int) */
1.1082 ++ int_ftype_ptr_int
1.1083 ++ = build_function_type (integer_type_node,
1.1084 ++ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1.1085 ++
1.1086 ++ /* int func (int, int, int) */
1.1087 ++ int_ftype_int_int_int
1.1088 ++ = build_function_type (integer_type_node,
1.1089 ++ tree_cons (NULL_TREE, integer_type_node,
1.1090 ++ tree_cons (NULL_TREE, integer_type_node,
1.1091 ++ int_endlink)));
1.1092 ++
1.1093 ++ /* Initialize avr32 builtins. */
1.1094 ++ def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
1.1095 ++ def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
1.1096 ++ def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
1.1097 ++ def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
1.1098 ++ def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
1.1099 ++ def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
1.1100 ++ def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
1.1101 ++ def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
1.1102 ++ def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
1.1103 ++ def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
1.1104 ++ def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
1.1105 ++ def_builtin ("__builtin_breakpoint", void_ftype_void,
1.1106 ++ AVR32_BUILTIN_BREAKPOINT);
1.1107 ++ def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
1.1108 ++ def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
1.1109 ++ def_builtin ("__builtin_bswap_16", short_ftype_short,
1.1110 ++ AVR32_BUILTIN_BSWAP16);
1.1111 ++ def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
1.1112 ++ def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
1.1113 ++ AVR32_BUILTIN_COP);
1.1114 ++ def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
1.1115 ++ def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
1.1116 ++ AVR32_BUILTIN_MVRC_W);
1.1117 ++ def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
1.1118 ++ AVR32_BUILTIN_MVCR_D);
1.1119 ++ def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
1.1120 ++ AVR32_BUILTIN_MVRC_D);
1.1121 ++ def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
1.1122 ++ def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
1.1123 ++ def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
1.1124 ++ AVR32_BUILTIN_SATRNDS);
1.1125 ++ def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
1.1126 ++ AVR32_BUILTIN_SATRNDU);
1.1127 ++ def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
1.1128 ++ def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
1.1129 ++ def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
1.1130 ++ AVR32_BUILTIN_MACSATHH_W);
1.1131 ++ def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
1.1132 ++ AVR32_BUILTIN_MACWH_D);
1.1133 ++ def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
1.1134 ++ AVR32_BUILTIN_MACHH_D);
1.1135 ++ def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS);
1.1136 ++ def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT);
1.1137 ++ def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC);
1.1138 ++
1.1139 ++ /* Add all builtins that are more or less simple operations on two
1.1140 ++ operands. */
1.1141 ++ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1.1142 ++ {
1.1143 ++ /* Use one of the operands; the target can have a different mode for
1.1144 ++ mask-generating compares. */
1.1145 ++
1.1146 ++ if (d->name == 0)
1.1147 ++ continue;
1.1148 ++
1.1149 ++ def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
1.1150 ++ }
1.1151 ++}
1.1152 ++
1.1153 ++
1.1154 ++/* Subroutine of avr32_expand_builtin to take care of binop insns. */
1.1155 ++
1.1156 ++static rtx
1.1157 ++avr32_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
1.1158 ++{
1.1159 ++ rtx pat;
1.1160 ++ tree arg0 = TREE_VALUE (arglist);
1.1161 ++ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1162 ++ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1163 ++ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1164 ++ enum machine_mode tmode = insn_data[icode].operand[0].mode;
1.1165 ++ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1.1166 ++ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1.1167 ++
1.1168 ++ if (!target
1.1169 ++ || GET_MODE (target) != tmode
1.1170 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1171 ++ target = gen_reg_rtx (tmode);
1.1172 ++
1.1173 ++ /* In case the insn wants input operands in modes different from the
1.1174 ++ result, abort. */
1.1175 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1.1176 ++ {
1.1177 ++ /* If op0 is already a reg we must cast it to the correct mode. */
1.1178 ++ if (REG_P (op0))
1.1179 ++ op0 = convert_to_mode (mode0, op0, 1);
1.1180 ++ else
1.1181 ++ op0 = copy_to_mode_reg (mode0, op0);
1.1182 ++ }
1.1183 ++ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1.1184 ++ {
1.1185 ++ /* If op1 is already a reg we must cast it to the correct mode. */
1.1186 ++ if (REG_P (op1))
1.1187 ++ op1 = convert_to_mode (mode1, op1, 1);
1.1188 ++ else
1.1189 ++ op1 = copy_to_mode_reg (mode1, op1);
1.1190 ++ }
1.1191 ++ pat = GEN_FCN (icode) (target, op0, op1);
1.1192 ++ if (!pat)
1.1193 ++ return 0;
1.1194 ++ emit_insn (pat);
1.1195 ++ return target;
1.1196 ++}
1.1197 ++
1.1198 ++/* Expand an expression EXP that calls a built-in function,
1.1199 ++ with result going to TARGET if that's convenient
1.1200 ++ (and in mode MODE if that's convenient).
1.1201 ++ SUBTARGET may be used as the target for computing one of EXP's operands.
1.1202 ++ IGNORE is nonzero if the value is to be ignored. */
1.1203 ++
1.1204 ++rtx
1.1205 ++avr32_expand_builtin (tree exp,
1.1206 ++ rtx target,
1.1207 ++ rtx subtarget ATTRIBUTE_UNUSED,
1.1208 ++ enum machine_mode mode ATTRIBUTE_UNUSED,
1.1209 ++ int ignore ATTRIBUTE_UNUSED)
1.1210 ++{
1.1211 ++ const struct builtin_description *d;
1.1212 ++ unsigned int i;
1.1213 ++ enum insn_code icode = 0;
1.1214 ++ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
1.1215 ++ tree arglist = TREE_OPERAND (exp, 1);
1.1216 ++ tree arg0, arg1, arg2;
1.1217 ++ rtx op0, op1, op2, pat;
1.1218 ++ enum machine_mode tmode, mode0, mode1;
1.1219 ++ enum machine_mode arg0_mode;
1.1220 ++ int fcode = DECL_FUNCTION_CODE (fndecl);
1.1221 ++
1.1222 ++ switch (fcode)
1.1223 ++ {
1.1224 ++ default:
1.1225 ++ break;
1.1226 ++
1.1227 ++ case AVR32_BUILTIN_SATS:
1.1228 ++ case AVR32_BUILTIN_SATU:
1.1229 ++ case AVR32_BUILTIN_SATRNDS:
1.1230 ++ case AVR32_BUILTIN_SATRNDU:
1.1231 ++ {
1.1232 ++ const char *fname;
1.1233 ++ switch (fcode)
1.1234 ++ {
1.1235 ++ default:
1.1236 ++ case AVR32_BUILTIN_SATS:
1.1237 ++ icode = CODE_FOR_sats;
1.1238 ++ fname = "sats";
1.1239 ++ break;
1.1240 ++ case AVR32_BUILTIN_SATU:
1.1241 ++ icode = CODE_FOR_satu;
1.1242 ++ fname = "satu";
1.1243 ++ break;
1.1244 ++ case AVR32_BUILTIN_SATRNDS:
1.1245 ++ icode = CODE_FOR_satrnds;
1.1246 ++ fname = "satrnds";
1.1247 ++ break;
1.1248 ++ case AVR32_BUILTIN_SATRNDU:
1.1249 ++ icode = CODE_FOR_satrndu;
1.1250 ++ fname = "satrndu";
1.1251 ++ break;
1.1252 ++ }
1.1253 ++
1.1254 ++ arg0 = TREE_VALUE (arglist);
1.1255 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1256 ++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1.1257 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1258 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1259 ++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1.1260 ++
1.1261 ++ tmode = insn_data[icode].operand[0].mode;
1.1262 ++
1.1263 ++
1.1264 ++ if (target == 0
1.1265 ++ || GET_MODE (target) != tmode
1.1266 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1267 ++ target = gen_reg_rtx (tmode);
1.1268 ++
1.1269 ++
1.1270 ++ if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
1.1271 ++ {
1.1272 ++ op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
1.1273 ++ }
1.1274 ++
1.1275 ++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1.1276 ++ {
1.1277 ++ error ("Parameter 2 to __builtin_%s should be a constant number.",
1.1278 ++ fname);
1.1279 ++ return NULL_RTX;
1.1280 ++ }
1.1281 ++
1.1282 ++ if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
1.1283 ++ {
1.1284 ++ error ("Parameter 3 to __builtin_%s should be a constant number.",
1.1285 ++ fname);
1.1286 ++ return NULL_RTX;
1.1287 ++ }
1.1288 ++
1.1289 ++ emit_move_insn (target, op0);
1.1290 ++ pat = GEN_FCN (icode) (target, op1, op2);
1.1291 ++ if (!pat)
1.1292 ++ return 0;
1.1293 ++ emit_insn (pat);
1.1294 ++
1.1295 ++ return target;
1.1296 ++ }
1.1297 ++ case AVR32_BUILTIN_MUSTR:
1.1298 ++ icode = CODE_FOR_mustr;
1.1299 ++ tmode = insn_data[icode].operand[0].mode;
1.1300 ++
1.1301 ++ if (target == 0
1.1302 ++ || GET_MODE (target) != tmode
1.1303 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1304 ++ target = gen_reg_rtx (tmode);
1.1305 ++ pat = GEN_FCN (icode) (target);
1.1306 ++ if (!pat)
1.1307 ++ return 0;
1.1308 ++ emit_insn (pat);
1.1309 ++ return target;
1.1310 ++
1.1311 ++ case AVR32_BUILTIN_MFSR:
1.1312 ++ icode = CODE_FOR_mfsr;
1.1313 ++ arg0 = TREE_VALUE (arglist);
1.1314 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1315 ++ tmode = insn_data[icode].operand[0].mode;
1.1316 ++ mode0 = insn_data[icode].operand[1].mode;
1.1317 ++
1.1318 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1.1319 ++ {
1.1320 ++ error ("Parameter 1 to __builtin_mfsr must be a constant number");
1.1321 ++ }
1.1322 ++
1.1323 ++ if (target == 0
1.1324 ++ || GET_MODE (target) != tmode
1.1325 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1326 ++ target = gen_reg_rtx (tmode);
1.1327 ++ pat = GEN_FCN (icode) (target, op0);
1.1328 ++ if (!pat)
1.1329 ++ return 0;
1.1330 ++ emit_insn (pat);
1.1331 ++ return target;
1.1332 ++ case AVR32_BUILTIN_MTSR:
1.1333 ++ icode = CODE_FOR_mtsr;
1.1334 ++ arg0 = TREE_VALUE (arglist);
1.1335 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1336 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1337 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1338 ++ mode0 = insn_data[icode].operand[0].mode;
1.1339 ++ mode1 = insn_data[icode].operand[1].mode;
1.1340 ++
1.1341 ++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1.1342 ++ {
1.1343 ++ error ("Parameter 1 to __builtin_mtsr must be a constant number");
1.1344 ++ return gen_reg_rtx (mode0);
1.1345 ++ }
1.1346 ++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1.1347 ++ op1 = copy_to_mode_reg (mode1, op1);
1.1348 ++ pat = GEN_FCN (icode) (op0, op1);
1.1349 ++ if (!pat)
1.1350 ++ return 0;
1.1351 ++ emit_insn (pat);
1.1352 ++ return NULL_RTX;
1.1353 ++ case AVR32_BUILTIN_MFDR:
1.1354 ++ icode = CODE_FOR_mfdr;
1.1355 ++ arg0 = TREE_VALUE (arglist);
1.1356 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1357 ++ tmode = insn_data[icode].operand[0].mode;
1.1358 ++ mode0 = insn_data[icode].operand[1].mode;
1.1359 ++
1.1360 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1.1361 ++ {
1.1362 ++ error ("Parameter 1 to __builtin_mfdr must be a constant number");
1.1363 ++ }
1.1364 ++
1.1365 ++ if (target == 0
1.1366 ++ || GET_MODE (target) != tmode
1.1367 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1368 ++ target = gen_reg_rtx (tmode);
1.1369 ++ pat = GEN_FCN (icode) (target, op0);
1.1370 ++ if (!pat)
1.1371 ++ return 0;
1.1372 ++ emit_insn (pat);
1.1373 ++ return target;
1.1374 ++ case AVR32_BUILTIN_MTDR:
1.1375 ++ icode = CODE_FOR_mtdr;
1.1376 ++ arg0 = TREE_VALUE (arglist);
1.1377 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1378 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1379 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1380 ++ mode0 = insn_data[icode].operand[0].mode;
1.1381 ++ mode1 = insn_data[icode].operand[1].mode;
1.1382 ++
1.1383 ++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1.1384 ++ {
1.1385 ++ error ("Parameter 1 to __builtin_mtdr must be a constant number");
1.1386 ++ return gen_reg_rtx (mode0);
1.1387 ++ }
1.1388 ++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1.1389 ++ op1 = copy_to_mode_reg (mode1, op1);
1.1390 ++ pat = GEN_FCN (icode) (op0, op1);
1.1391 ++ if (!pat)
1.1392 ++ return 0;
1.1393 ++ emit_insn (pat);
1.1394 ++ return NULL_RTX;
1.1395 ++ case AVR32_BUILTIN_CACHE:
1.1396 ++ icode = CODE_FOR_cache;
1.1397 ++ arg0 = TREE_VALUE (arglist);
1.1398 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1399 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1400 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1401 ++ mode0 = insn_data[icode].operand[0].mode;
1.1402 ++ mode1 = insn_data[icode].operand[1].mode;
1.1403 ++
1.1404 ++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1.1405 ++ {
1.1406 ++ error ("Parameter 2 to __builtin_cache must be a constant number");
1.1407 ++ return gen_reg_rtx (mode1);
1.1408 ++ }
1.1409 ++
1.1410 ++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1.1411 ++ op0 = copy_to_mode_reg (mode0, op0);
1.1412 ++
1.1413 ++ pat = GEN_FCN (icode) (op0, op1);
1.1414 ++ if (!pat)
1.1415 ++ return 0;
1.1416 ++ emit_insn (pat);
1.1417 ++ return NULL_RTX;
1.1418 ++ case AVR32_BUILTIN_SYNC:
1.1419 ++ case AVR32_BUILTIN_MUSFR:
1.1420 ++ case AVR32_BUILTIN_SSRF:
1.1421 ++ case AVR32_BUILTIN_CSRF:
1.1422 ++ {
1.1423 ++ const char *fname;
1.1424 ++ switch (fcode)
1.1425 ++ {
1.1426 ++ default:
1.1427 ++ case AVR32_BUILTIN_SYNC:
1.1428 ++ icode = CODE_FOR_sync;
1.1429 ++ fname = "sync";
1.1430 ++ break;
1.1431 ++ case AVR32_BUILTIN_MUSFR:
1.1432 ++ icode = CODE_FOR_musfr;
1.1433 ++ fname = "musfr";
1.1434 ++ break;
1.1435 ++ case AVR32_BUILTIN_SSRF:
1.1436 ++ icode = CODE_FOR_ssrf;
1.1437 ++ fname = "ssrf";
1.1438 ++ break;
1.1439 ++ case AVR32_BUILTIN_CSRF:
1.1440 ++ icode = CODE_FOR_csrf;
1.1441 ++ fname = "csrf";
1.1442 ++ break;
1.1443 ++ }
1.1444 ++
1.1445 ++ arg0 = TREE_VALUE (arglist);
1.1446 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1447 ++ mode0 = insn_data[icode].operand[0].mode;
1.1448 ++
1.1449 ++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1.1450 ++ {
1.1451 ++ if (icode == CODE_FOR_musfr)
1.1452 ++ op0 = copy_to_mode_reg (mode0, op0);
1.1453 ++ else
1.1454 ++ {
1.1455 ++ error ("Parameter to __builtin_%s is illegal.", fname);
1.1456 ++ return gen_reg_rtx (mode0);
1.1457 ++ }
1.1458 ++ }
1.1459 ++ pat = GEN_FCN (icode) (op0);
1.1460 ++ if (!pat)
1.1461 ++ return 0;
1.1462 ++ emit_insn (pat);
1.1463 ++ return NULL_RTX;
1.1464 ++ }
1.1465 ++ case AVR32_BUILTIN_TLBR:
1.1466 ++ icode = CODE_FOR_tlbr;
1.1467 ++ pat = GEN_FCN (icode) (NULL_RTX);
1.1468 ++ if (!pat)
1.1469 ++ return 0;
1.1470 ++ emit_insn (pat);
1.1471 ++ return NULL_RTX;
1.1472 ++ case AVR32_BUILTIN_TLBS:
1.1473 ++ icode = CODE_FOR_tlbs;
1.1474 ++ pat = GEN_FCN (icode) (NULL_RTX);
1.1475 ++ if (!pat)
1.1476 ++ return 0;
1.1477 ++ emit_insn (pat);
1.1478 ++ return NULL_RTX;
1.1479 ++ case AVR32_BUILTIN_TLBW:
1.1480 ++ icode = CODE_FOR_tlbw;
1.1481 ++ pat = GEN_FCN (icode) (NULL_RTX);
1.1482 ++ if (!pat)
1.1483 ++ return 0;
1.1484 ++ emit_insn (pat);
1.1485 ++ return NULL_RTX;
1.1486 ++ case AVR32_BUILTIN_BREAKPOINT:
1.1487 ++ icode = CODE_FOR_breakpoint;
1.1488 ++ pat = GEN_FCN (icode) (NULL_RTX);
1.1489 ++ if (!pat)
1.1490 ++ return 0;
1.1491 ++ emit_insn (pat);
1.1492 ++ return NULL_RTX;
1.1493 ++ case AVR32_BUILTIN_XCHG:
1.1494 ++ icode = CODE_FOR_sync_lock_test_and_setsi;
1.1495 ++ arg0 = TREE_VALUE (arglist);
1.1496 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1497 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1498 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1499 ++ tmode = insn_data[icode].operand[0].mode;
1.1500 ++ mode0 = insn_data[icode].operand[1].mode;
1.1501 ++ mode1 = insn_data[icode].operand[2].mode;
1.1502 ++
1.1503 ++ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1.1504 ++ {
1.1505 ++ op1 = copy_to_mode_reg (mode1, op1);
1.1506 ++ }
1.1507 ++
1.1508 ++ op0 = force_reg (GET_MODE (op0), op0);
1.1509 ++ op0 = gen_rtx_MEM (GET_MODE (op0), op0);
1.1510 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1.1511 ++ {
1.1512 ++ error
1.1513 ++ ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
1.1514 ++ }
1.1515 ++
1.1516 ++ if (target == 0
1.1517 ++ || GET_MODE (target) != tmode
1.1518 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1519 ++ target = gen_reg_rtx (tmode);
1.1520 ++ pat = GEN_FCN (icode) (target, op0, op1);
1.1521 ++ if (!pat)
1.1522 ++ return 0;
1.1523 ++ emit_insn (pat);
1.1524 ++ return target;
1.1525 ++ case AVR32_BUILTIN_LDXI:
1.1526 ++ icode = CODE_FOR_ldxi;
1.1527 ++ arg0 = TREE_VALUE (arglist);
1.1528 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1529 ++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1.1530 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1531 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1532 ++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1.1533 ++ tmode = insn_data[icode].operand[0].mode;
1.1534 ++ mode0 = insn_data[icode].operand[1].mode;
1.1535 ++ mode1 = insn_data[icode].operand[2].mode;
1.1536 ++
1.1537 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1.1538 ++ {
1.1539 ++ op0 = copy_to_mode_reg (mode0, op0);
1.1540 ++ }
1.1541 ++
1.1542 ++ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1.1543 ++ {
1.1544 ++ op1 = copy_to_mode_reg (mode1, op1);
1.1545 ++ }
1.1546 ++
1.1547 ++ if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
1.1548 ++ {
1.1549 ++ error
1.1550 ++ ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
1.1551 ++ return gen_reg_rtx (mode0);
1.1552 ++ }
1.1553 ++
1.1554 ++ if (target == 0
1.1555 ++ || GET_MODE (target) != tmode
1.1556 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1557 ++ target = gen_reg_rtx (tmode);
1.1558 ++ pat = GEN_FCN (icode) (target, op0, op1, op2);
1.1559 ++ if (!pat)
1.1560 ++ return 0;
1.1561 ++ emit_insn (pat);
1.1562 ++ return target;
1.1563 ++ case AVR32_BUILTIN_BSWAP16:
1.1564 ++ {
1.1565 ++ icode = CODE_FOR_bswap_16;
1.1566 ++ arg0 = TREE_VALUE (arglist);
1.1567 ++ arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
1.1568 ++ mode0 = insn_data[icode].operand[1].mode;
1.1569 ++ if (arg0_mode != mode0)
1.1570 ++ arg0 = build1 (NOP_EXPR,
1.1571 ++ (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
1.1572 ++
1.1573 ++ op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
1.1574 ++ tmode = insn_data[icode].operand[0].mode;
1.1575 ++
1.1576 ++
1.1577 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1.1578 ++ {
1.1579 ++ if ( CONST_INT_P (op0) )
1.1580 ++ {
1.1581 ++ HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
1.1582 ++ ((INTVAL (op0)&0xff00) >> 8) );
1.1583 ++ /* Sign extend 16-bit value to host wide int */
1.1584 ++ val <<= (HOST_BITS_PER_WIDE_INT - 16);
1.1585 ++ val >>= (HOST_BITS_PER_WIDE_INT - 16);
1.1586 ++ op0 = GEN_INT(val);
1.1587 ++ if (target == 0
1.1588 ++ || GET_MODE (target) != tmode
1.1589 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1590 ++ target = gen_reg_rtx (tmode);
1.1591 ++ emit_move_insn(target, op0);
1.1592 ++ return target;
1.1593 ++ }
1.1594 ++ else
1.1595 ++ op0 = copy_to_mode_reg (mode0, op0);
1.1596 ++ }
1.1597 ++
1.1598 ++ if (target == 0
1.1599 ++ || GET_MODE (target) != tmode
1.1600 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1601 ++ {
1.1602 ++ target = gen_reg_rtx (tmode);
1.1603 ++ }
1.1604 ++
1.1605 ++
1.1606 ++ pat = GEN_FCN (icode) (target, op0);
1.1607 ++ if (!pat)
1.1608 ++ return 0;
1.1609 ++ emit_insn (pat);
1.1610 ++
1.1611 ++ return target;
1.1612 ++ }
1.1613 ++ case AVR32_BUILTIN_BSWAP32:
1.1614 ++ {
1.1615 ++ icode = CODE_FOR_bswap_32;
1.1616 ++ arg0 = TREE_VALUE (arglist);
1.1617 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1618 ++ tmode = insn_data[icode].operand[0].mode;
1.1619 ++ mode0 = insn_data[icode].operand[1].mode;
1.1620 ++
1.1621 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1.1622 ++ {
1.1623 ++ if ( CONST_INT_P (op0) )
1.1624 ++ {
1.1625 ++ HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
1.1626 ++ ((INTVAL (op0)&0x0000ff00) << 8) |
1.1627 ++ ((INTVAL (op0)&0x00ff0000) >> 8) |
1.1628 ++ ((INTVAL (op0)&0xff000000) >> 24) );
1.1629 ++ /* Sign extend 32-bit value to host wide int */
1.1630 ++ val <<= (HOST_BITS_PER_WIDE_INT - 32);
1.1631 ++ val >>= (HOST_BITS_PER_WIDE_INT - 32);
1.1632 ++ op0 = GEN_INT(val);
1.1633 ++ if (target == 0
1.1634 ++ || GET_MODE (target) != tmode
1.1635 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1636 ++ target = gen_reg_rtx (tmode);
1.1637 ++ emit_move_insn(target, op0);
1.1638 ++ return target;
1.1639 ++ }
1.1640 ++ else
1.1641 ++ op0 = copy_to_mode_reg (mode0, op0);
1.1642 ++ }
1.1643 ++
1.1644 ++ if (target == 0
1.1645 ++ || GET_MODE (target) != tmode
1.1646 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1647 ++ target = gen_reg_rtx (tmode);
1.1648 ++
1.1649 ++
1.1650 ++ pat = GEN_FCN (icode) (target, op0);
1.1651 ++ if (!pat)
1.1652 ++ return 0;
1.1653 ++ emit_insn (pat);
1.1654 ++
1.1655 ++ return target;
1.1656 ++ }
1.1657 ++ case AVR32_BUILTIN_MVCR_W:
1.1658 ++ case AVR32_BUILTIN_MVCR_D:
1.1659 ++ {
1.1660 ++ arg0 = TREE_VALUE (arglist);
1.1661 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1662 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1663 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1664 ++
1.1665 ++ if (fcode == AVR32_BUILTIN_MVCR_W)
1.1666 ++ icode = CODE_FOR_mvcrsi;
1.1667 ++ else
1.1668 ++ icode = CODE_FOR_mvcrdi;
1.1669 ++
1.1670 ++ tmode = insn_data[icode].operand[0].mode;
1.1671 ++
1.1672 ++ if (target == 0
1.1673 ++ || GET_MODE (target) != tmode
1.1674 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1675 ++ target = gen_reg_rtx (tmode);
1.1676 ++
1.1677 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
1.1678 ++ {
1.1679 ++ error
1.1680 ++ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1.1681 ++ error ("Number should be between 0 and 7.");
1.1682 ++ return NULL_RTX;
1.1683 ++ }
1.1684 ++
1.1685 ++ if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
1.1686 ++ {
1.1687 ++ error
1.1688 ++ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1.1689 ++ error ("Number should be between 0 and 15.");
1.1690 ++ return NULL_RTX;
1.1691 ++ }
1.1692 ++
1.1693 ++ pat = GEN_FCN (icode) (target, op0, op1);
1.1694 ++ if (!pat)
1.1695 ++ return 0;
1.1696 ++ emit_insn (pat);
1.1697 ++
1.1698 ++ return target;
1.1699 ++ }
1.1700 ++ case AVR32_BUILTIN_MACSATHH_W:
1.1701 ++ case AVR32_BUILTIN_MACWH_D:
1.1702 ++ case AVR32_BUILTIN_MACHH_D:
1.1703 ++ {
1.1704 ++ arg0 = TREE_VALUE (arglist);
1.1705 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1706 ++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1.1707 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1708 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1709 ++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1.1710 ++
1.1711 ++ icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
1.1712 ++ (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
1.1713 ++ CODE_FOR_machh_d);
1.1714 ++
1.1715 ++ tmode = insn_data[icode].operand[0].mode;
1.1716 ++ mode0 = insn_data[icode].operand[1].mode;
1.1717 ++ mode1 = insn_data[icode].operand[2].mode;
1.1718 ++
1.1719 ++
1.1720 ++ if (!target
1.1721 ++ || GET_MODE (target) != tmode
1.1722 ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1.1723 ++ target = gen_reg_rtx (tmode);
1.1724 ++
1.1725 ++ if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
1.1726 ++ {
1.1727 ++ /* If op0 is already a reg we must cast it to the correct mode. */
1.1728 ++ if (REG_P (op0))
1.1729 ++ op0 = convert_to_mode (tmode, op0, 1);
1.1730 ++ else
1.1731 ++ op0 = copy_to_mode_reg (tmode, op0);
1.1732 ++ }
1.1733 ++
1.1734 ++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
1.1735 ++ {
1.1736 ++ /* If op1 is already a reg we must cast it to the correct mode. */
1.1737 ++ if (REG_P (op1))
1.1738 ++ op1 = convert_to_mode (mode0, op1, 1);
1.1739 ++ else
1.1740 ++ op1 = copy_to_mode_reg (mode0, op1);
1.1741 ++ }
1.1742 ++
1.1743 ++ if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
1.1744 ++ {
1.1745 ++ /* If op1 is already a reg we must cast it to the correct mode. */
1.1746 ++ if (REG_P (op2))
1.1747 ++ op2 = convert_to_mode (mode1, op2, 1);
1.1748 ++ else
1.1749 ++ op2 = copy_to_mode_reg (mode1, op2);
1.1750 ++ }
1.1751 ++
1.1752 ++ emit_move_insn (target, op0);
1.1753 ++
1.1754 ++ pat = GEN_FCN (icode) (target, op1, op2);
1.1755 ++ if (!pat)
1.1756 ++ return 0;
1.1757 ++ emit_insn (pat);
1.1758 ++ return target;
1.1759 ++ }
1.1760 ++ case AVR32_BUILTIN_MVRC_W:
1.1761 ++ case AVR32_BUILTIN_MVRC_D:
1.1762 ++ {
1.1763 ++ arg0 = TREE_VALUE (arglist);
1.1764 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1765 ++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1.1766 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1767 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1768 ++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1.1769 ++
1.1770 ++ if (fcode == AVR32_BUILTIN_MVRC_W)
1.1771 ++ icode = CODE_FOR_mvrcsi;
1.1772 ++ else
1.1773 ++ icode = CODE_FOR_mvrcdi;
1.1774 ++
1.1775 ++ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1.1776 ++ {
1.1777 ++ error ("Parameter 1 is not a valid coprocessor number.");
1.1778 ++ error ("Number should be between 0 and 7.");
1.1779 ++ return NULL_RTX;
1.1780 ++ }
1.1781 ++
1.1782 ++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1.1783 ++ {
1.1784 ++ error ("Parameter 2 is not a valid coprocessor register number.");
1.1785 ++ error ("Number should be between 0 and 15.");
1.1786 ++ return NULL_RTX;
1.1787 ++ }
1.1788 ++
1.1789 ++ if (GET_CODE (op2) == CONST_INT
1.1790 ++ || GET_CODE (op2) == CONST
1.1791 ++ || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
1.1792 ++ {
1.1793 ++ op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
1.1794 ++ }
1.1795 ++
1.1796 ++ if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
1.1797 ++ op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
1.1798 ++
1.1799 ++
1.1800 ++ pat = GEN_FCN (icode) (op0, op1, op2);
1.1801 ++ if (!pat)
1.1802 ++ return 0;
1.1803 ++ emit_insn (pat);
1.1804 ++
1.1805 ++ return NULL_RTX;
1.1806 ++ }
1.1807 ++ case AVR32_BUILTIN_COP:
1.1808 ++ {
1.1809 ++ rtx op3, op4;
1.1810 ++ tree arg3, arg4;
1.1811 ++ icode = CODE_FOR_cop;
1.1812 ++ arg0 = TREE_VALUE (arglist);
1.1813 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1814 ++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1.1815 ++ arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1.1816 ++ arg4 =
1.1817 ++ TREE_VALUE (TREE_CHAIN
1.1818 ++ (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))));
1.1819 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1820 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1821 ++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1.1822 ++ op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
1.1823 ++ op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0);
1.1824 ++
1.1825 ++ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1.1826 ++ {
1.1827 ++ error
1.1828 ++ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1.1829 ++ error ("Number should be between 0 and 7.");
1.1830 ++ return NULL_RTX;
1.1831 ++ }
1.1832 ++
1.1833 ++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1.1834 ++ {
1.1835 ++ error
1.1836 ++ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1.1837 ++ error ("Number should be between 0 and 15.");
1.1838 ++ return NULL_RTX;
1.1839 ++ }
1.1840 ++
1.1841 ++ if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
1.1842 ++ {
1.1843 ++ error
1.1844 ++ ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
1.1845 ++ error ("Number should be between 0 and 15.");
1.1846 ++ return NULL_RTX;
1.1847 ++ }
1.1848 ++
1.1849 ++ if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
1.1850 ++ {
1.1851 ++ error
1.1852 ++ ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
1.1853 ++ error ("Number should be between 0 and 15.");
1.1854 ++ return NULL_RTX;
1.1855 ++ }
1.1856 ++
1.1857 ++ if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
1.1858 ++ {
1.1859 ++ error
1.1860 ++ ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
1.1861 ++ error ("Number should be between 0 and 127.");
1.1862 ++ return NULL_RTX;
1.1863 ++ }
1.1864 ++
1.1865 ++ pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
1.1866 ++ if (!pat)
1.1867 ++ return 0;
1.1868 ++ emit_insn (pat);
1.1869 ++
1.1870 ++ return target;
1.1871 ++ }
1.1872 ++ case AVR32_BUILTIN_MEMS:
1.1873 ++ case AVR32_BUILTIN_MEMC:
1.1874 ++ case AVR32_BUILTIN_MEMT:
1.1875 ++ {
1.1876 ++ if (!TARGET_RMW)
1.1877 ++ error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns.");
1.1878 ++
1.1879 ++ switch (fcode) {
1.1880 ++ case AVR32_BUILTIN_MEMS:
1.1881 ++ icode = CODE_FOR_iorsi3;
1.1882 ++ break;
1.1883 ++ case AVR32_BUILTIN_MEMC:
1.1884 ++ icode = CODE_FOR_andsi3;
1.1885 ++ break;
1.1886 ++ case AVR32_BUILTIN_MEMT:
1.1887 ++ icode = CODE_FOR_xorsi3;
1.1888 ++ break;
1.1889 ++ }
1.1890 ++
1.1891 ++ arg0 = TREE_VALUE (arglist);
1.1892 ++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1.1893 ++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1.1894 ++ if ( GET_CODE (op0) == SYMBOL_REF )
1.1895 ++ // This symbol must be RMW addressable
1.1896 ++ SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
1.1897 ++ op0 = gen_rtx_MEM(SImode, op0);
1.1898 ++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1.1899 ++ mode0 = insn_data[icode].operand[1].mode;
1.1900 ++
1.1901 ++
1.1902 ++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1.1903 ++ {
1.1904 ++ error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol.");
1.1905 ++ }
1.1906 ++
1.1907 ++ if ( !CONST_INT_P (op1)
1.1908 ++ || INTVAL (op1) > 31
1.1909 ++ || INTVAL (op1) < 0 )
1.1910 ++ error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31.");
1.1911 ++
1.1912 ++ if ( fcode == AVR32_BUILTIN_MEMC )
1.1913 ++ op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff);
1.1914 ++ else
1.1915 ++ op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff);
1.1916 ++ pat = GEN_FCN (icode) (op0, op0, op1);
1.1917 ++ if (!pat)
1.1918 ++ return 0;
1.1919 ++ emit_insn (pat);
1.1920 ++ return op0;
1.1921 ++ }
1.1922 ++
1.1923 ++ }
1.1924 ++
1.1925 ++ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1.1926 ++ if (d->code == fcode)
1.1927 ++ return avr32_expand_binop_builtin (d->icode, arglist, target);
1.1928 ++
1.1929 ++
1.1930 ++ /* @@@ Should really do something sensible here. */
1.1931 ++ return NULL_RTX;
1.1932 ++}
1.1933 ++
1.1934 ++
1.1935 ++/* Handle an "interrupt" or "isr" attribute;
1.1936 ++ arguments as in struct attribute_spec.handler. */
1.1937 ++
1.1938 ++static tree
1.1939 ++avr32_handle_isr_attribute (tree * node, tree name, tree args,
1.1940 ++ int flags, bool * no_add_attrs)
1.1941 ++{
1.1942 ++ if (DECL_P (*node))
1.1943 ++ {
1.1944 ++ if (TREE_CODE (*node) != FUNCTION_DECL)
1.1945 ++ {
1.1946 ++ warning (OPT_Wattributes,"`%s' attribute only applies to functions",
1.1947 ++ IDENTIFIER_POINTER (name));
1.1948 ++ *no_add_attrs = true;
1.1949 ++ }
1.1950 ++ /* FIXME: the argument if any is checked for type attributes; should it
1.1951 ++ be checked for decl ones? */
1.1952 ++ }
1.1953 ++ else
1.1954 ++ {
1.1955 ++ if (TREE_CODE (*node) == FUNCTION_TYPE
1.1956 ++ || TREE_CODE (*node) == METHOD_TYPE)
1.1957 ++ {
1.1958 ++ if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
1.1959 ++ {
1.1960 ++ warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
1.1961 ++ *no_add_attrs = true;
1.1962 ++ }
1.1963 ++ }
1.1964 ++ else if (TREE_CODE (*node) == POINTER_TYPE
1.1965 ++ && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
1.1966 ++ || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
1.1967 ++ && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
1.1968 ++ {
1.1969 ++ *node = build_variant_type_copy (*node);
1.1970 ++ TREE_TYPE (*node) = build_type_attribute_variant
1.1971 ++ (TREE_TYPE (*node),
1.1972 ++ tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
1.1973 ++ *no_add_attrs = true;
1.1974 ++ }
1.1975 ++ else
1.1976 ++ {
1.1977 ++ /* Possibly pass this attribute on from the type to a decl. */
1.1978 ++ if (flags & ((int) ATTR_FLAG_DECL_NEXT
1.1979 ++ | (int) ATTR_FLAG_FUNCTION_NEXT
1.1980 ++ | (int) ATTR_FLAG_ARRAY_NEXT))
1.1981 ++ {
1.1982 ++ *no_add_attrs = true;
1.1983 ++ return tree_cons (name, args, NULL_TREE);
1.1984 ++ }
1.1985 ++ else
1.1986 ++ {
1.1987 ++ warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
1.1988 ++ }
1.1989 ++ }
1.1990 ++ }
1.1991 ++
1.1992 ++ return NULL_TREE;
1.1993 ++}
1.1994 ++
1.1995 ++/* Handle an attribute requiring a FUNCTION_DECL;
1.1996 ++ arguments as in struct attribute_spec.handler. */
1.1997 ++static tree
1.1998 ++avr32_handle_fndecl_attribute (tree * node, tree name,
1.1999 ++ tree args ATTRIBUTE_UNUSED,
1.2000 ++ int flags ATTRIBUTE_UNUSED,
1.2001 ++ bool * no_add_attrs)
1.2002 ++{
1.2003 ++ if (TREE_CODE (*node) != FUNCTION_DECL)
1.2004 ++ {
1.2005 ++ warning (OPT_Wattributes,"%qs attribute only applies to functions",
1.2006 ++ IDENTIFIER_POINTER (name));
1.2007 ++ *no_add_attrs = true;
1.2008 ++ }
1.2009 ++
1.2010 ++ return NULL_TREE;
1.2011 ++}
1.2012 ++
1.2013 ++
1.2014 ++/* Handle an acall attribute;
1.2015 ++ arguments as in struct attribute_spec.handler. */
1.2016 ++
1.2017 ++static tree
1.2018 ++avr32_handle_acall_attribute (tree * node, tree name,
1.2019 ++ tree args ATTRIBUTE_UNUSED,
1.2020 ++ int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
1.2021 ++{
1.2022 ++ if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
1.2023 ++ {
1.2024 ++ warning (OPT_Wattributes,"`%s' attribute not yet supported...",
1.2025 ++ IDENTIFIER_POINTER (name));
1.2026 ++ *no_add_attrs = true;
1.2027 ++ return NULL_TREE;
1.2028 ++ }
1.2029 ++
1.2030 ++ warning (OPT_Wattributes,"`%s' attribute only applies to functions",
1.2031 ++ IDENTIFIER_POINTER (name));
1.2032 ++ *no_add_attrs = true;
1.2033 ++ return NULL_TREE;
1.2034 ++}
1.2035 ++
1.2036 ++
1.2037 ++/* Return 0 if the attributes for two types are incompatible, 1 if they
1.2038 ++ are compatible, and 2 if they are nearly compatible (which causes a
1.2039 ++ warning to be generated). */
1.2040 ++
1.2041 ++static int
1.2042 ++avr32_comp_type_attributes (tree type1, tree type2)
1.2043 ++{
1.2044 ++ int acall1, acall2, isr1, isr2, naked1, naked2;
1.2045 ++
1.2046 ++ /* Check for mismatch of non-default calling convention. */
1.2047 ++ if (TREE_CODE (type1) != FUNCTION_TYPE)
1.2048 ++ return 1;
1.2049 ++
1.2050 ++ /* Check for mismatched call attributes. */
1.2051 ++ acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
1.2052 ++ acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
1.2053 ++ naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
1.2054 ++ naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
1.2055 ++ isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
1.2056 ++ if (!isr1)
1.2057 ++ isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
1.2058 ++
1.2059 ++ isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
1.2060 ++ if (!isr2)
1.2061 ++ isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
1.2062 ++
1.2063 ++ if ((acall1 && isr2)
1.2064 ++ || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
1.2065 ++ return 0;
1.2066 ++
1.2067 ++ return 1;
1.2068 ++}
1.2069 ++
1.2070 ++
1.2071 ++/* Computes the type of the current function. */
1.2072 ++
1.2073 ++static unsigned long
1.2074 ++avr32_compute_func_type (void)
1.2075 ++{
1.2076 ++ unsigned long type = AVR32_FT_UNKNOWN;
1.2077 ++ tree a;
1.2078 ++ tree attr;
1.2079 ++
1.2080 ++ if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1.2081 ++ abort ();
1.2082 ++
1.2083 ++ /* Decide if the current function is volatile. Such functions never
1.2084 ++ return, and many memory cycles can be saved by not storing register
1.2085 ++ values that will never be needed again. This optimization was added to
1.2086 ++ speed up context switching in a kernel application. */
1.2087 ++ if (optimize > 0
1.2088 ++ && TREE_NOTHROW (current_function_decl)
1.2089 ++ && TREE_THIS_VOLATILE (current_function_decl))
1.2090 ++ type |= AVR32_FT_VOLATILE;
1.2091 ++
1.2092 ++ if (cfun->static_chain_decl != NULL)
1.2093 ++ type |= AVR32_FT_NESTED;
1.2094 ++
1.2095 ++ attr = DECL_ATTRIBUTES (current_function_decl);
1.2096 ++
1.2097 ++ a = lookup_attribute ("isr", attr);
1.2098 ++ if (a == NULL_TREE)
1.2099 ++ a = lookup_attribute ("interrupt", attr);
1.2100 ++
1.2101 ++ if (a == NULL_TREE)
1.2102 ++ type |= AVR32_FT_NORMAL;
1.2103 ++ else
1.2104 ++ type |= avr32_isr_value (TREE_VALUE (a));
1.2105 ++
1.2106 ++
1.2107 ++ a = lookup_attribute ("acall", attr);
1.2108 ++ if (a != NULL_TREE)
1.2109 ++ type |= AVR32_FT_ACALL;
1.2110 ++
1.2111 ++ a = lookup_attribute ("naked", attr);
1.2112 ++ if (a != NULL_TREE)
1.2113 ++ type |= AVR32_FT_NAKED;
1.2114 ++
1.2115 ++ return type;
1.2116 ++}
1.2117 ++
1.2118 ++/* Returns the type of the current function. */
1.2119 ++
1.2120 ++static unsigned long
1.2121 ++avr32_current_func_type (void)
1.2122 ++{
1.2123 ++ if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
1.2124 ++ cfun->machine->func_type = avr32_compute_func_type ();
1.2125 ++
1.2126 ++ return cfun->machine->func_type;
1.2127 ++}
1.2128 ++
1.2129 ++/*
1.2130 ++ This target hook should return true if we should not pass type solely
1.2131 ++ in registers. The file expr.h defines a definition that is usually appropriate,
1.2132 ++ refer to expr.h for additional documentation.
1.2133 ++*/
1.2134 ++bool
1.2135 ++avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
1.2136 ++{
1.2137 ++ if (type && AGGREGATE_TYPE_P (type)
1.2138 ++ /* If the alignment is less than the size then pass in the struct on
1.2139 ++ the stack. */
1.2140 ++ && ((unsigned int) TYPE_ALIGN_UNIT (type) <
1.2141 ++ (unsigned int) int_size_in_bytes (type))
1.2142 ++ /* If we support unaligned word accesses then structs of size 4 and 8
1.2143 ++ can have any alignment and still be passed in registers. */
1.2144 ++ && !(TARGET_UNALIGNED_WORD
1.2145 ++ && (int_size_in_bytes (type) == 4
1.2146 ++ || int_size_in_bytes (type) == 8))
1.2147 ++ /* Double word structs need only a word alignment. */
1.2148 ++ && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
1.2149 ++ return true;
1.2150 ++
1.2151 ++ if (type && AGGREGATE_TYPE_P (type)
1.2152 ++ /* Structs of size 3,5,6,7 are always passed in registers. */
1.2153 ++ && (int_size_in_bytes (type) == 3
1.2154 ++ || int_size_in_bytes (type) == 5
1.2155 ++ || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
1.2156 ++ return true;
1.2157 ++
1.2158 ++
1.2159 ++ return (type && TREE_ADDRESSABLE (type));
1.2160 ++}
1.2161 ++
1.2162 ++
1.2163 ++bool
1.2164 ++avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1.2165 ++{
1.2166 ++ return true;
1.2167 ++}
1.2168 ++
1.2169 ++/*
1.2170 ++ This target hook should return true if an argument at the position indicated
1.2171 ++ by cum should be passed by reference. This predicate is queried after target
1.2172 ++ independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
1.2173 ++
1.2174 ++ If the hook returns true, a copy of that argument is made in memory and a
1.2175 ++ pointer to the argument is passed instead of the argument itself. The pointer
1.2176 ++ is passed in whatever way is appropriate for passing a pointer to that type.
1.2177 ++*/
1.2178 ++bool
1.2179 ++avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
1.2180 ++ enum machine_mode mode ATTRIBUTE_UNUSED,
1.2181 ++ tree type, bool named ATTRIBUTE_UNUSED)
1.2182 ++{
1.2183 ++ return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
1.2184 ++}
1.2185 ++
1.2186 ++static int
1.2187 ++avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
1.2188 ++ enum machine_mode mode ATTRIBUTE_UNUSED,
1.2189 ++ tree type ATTRIBUTE_UNUSED,
1.2190 ++ bool named ATTRIBUTE_UNUSED)
1.2191 ++{
1.2192 ++ return 0;
1.2193 ++}
1.2194 ++
1.2195 ++
1.2196 ++struct gcc_target targetm = TARGET_INITIALIZER;
1.2197 ++
1.2198 ++/*
1.2199 ++ Table used to convert from register number in the assembler instructions and
1.2200 ++ the register numbers used in gcc.
1.2201 ++*/
1.2202 ++const int avr32_function_arg_reglist[] = {
1.2203 ++ INTERNAL_REGNUM (12),
1.2204 ++ INTERNAL_REGNUM (11),
1.2205 ++ INTERNAL_REGNUM (10),
1.2206 ++ INTERNAL_REGNUM (9),
1.2207 ++ INTERNAL_REGNUM (8)
1.2208 ++};
1.2209 ++
1.2210 ++rtx avr32_compare_op0 = NULL_RTX;
1.2211 ++rtx avr32_compare_op1 = NULL_RTX;
1.2212 ++rtx avr32_compare_operator = NULL_RTX;
1.2213 ++rtx avr32_acc_cache = NULL_RTX;
1.2214 ++
1.2215 ++/*
1.2216 ++ Returns nonzero if it is allowed to store a value of mode mode in hard
1.2217 ++ register number regno.
1.2218 ++*/
1.2219 ++int
1.2220 ++avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
1.2221 ++{
1.2222 ++ /* We allow only float modes in the fp-registers */
1.2223 ++ if (regnr >= FIRST_FP_REGNUM
1.2224 ++ && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
1.2225 ++ {
1.2226 ++ return 0;
1.2227 ++ }
1.2228 ++
1.2229 ++ switch (mode)
1.2230 ++ {
1.2231 ++ case DImode: /* long long */
1.2232 ++ case DFmode: /* double */
1.2233 ++ case SCmode: /* __complex__ float */
1.2234 ++ case CSImode: /* __complex__ int */
1.2235 ++ if (regnr < 4)
1.2236 ++ { /* long long int not supported in r12, sp, lr
1.2237 ++ or pc. */
1.2238 ++ return 0;
1.2239 ++ }
1.2240 ++ else
1.2241 ++ {
1.2242 ++ if (regnr % 2) /* long long int has to be refered in even
1.2243 ++ registers. */
1.2244 ++ return 0;
1.2245 ++ else
1.2246 ++ return 1;
1.2247 ++ }
1.2248 ++ case CDImode: /* __complex__ long long */
1.2249 ++ case DCmode: /* __complex__ double */
1.2250 ++ case TImode: /* 16 bytes */
1.2251 ++ if (regnr < 7)
1.2252 ++ return 0;
1.2253 ++ else if (regnr % 2)
1.2254 ++ return 0;
1.2255 ++ else
1.2256 ++ return 1;
1.2257 ++ default:
1.2258 ++ return 1;
1.2259 ++ }
1.2260 ++}
1.2261 ++
1.2262 ++
1.2263 ++int
1.2264 ++avr32_rnd_operands (rtx add, rtx shift)
1.2265 ++{
1.2266 ++ if (GET_CODE (shift) == CONST_INT &&
1.2267 ++ GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
1.2268 ++ {
1.2269 ++ if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
1.2270 ++ return TRUE;
1.2271 ++ }
1.2272 ++
1.2273 ++ return FALSE;
1.2274 ++}
1.2275 ++
1.2276 ++
1.2277 ++
1.2278 ++int
1.2279 ++avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
1.2280 ++{
1.2281 ++ switch (c)
1.2282 ++ {
1.2283 ++ case 'K':
1.2284 ++ case 'I':
1.2285 ++ {
1.2286 ++ HOST_WIDE_INT min_value = 0, max_value = 0;
1.2287 ++ char size_str[3];
1.2288 ++ int const_size;
1.2289 ++
1.2290 ++ size_str[0] = str[2];
1.2291 ++ size_str[1] = str[3];
1.2292 ++ size_str[2] = '\0';
1.2293 ++ const_size = atoi (size_str);
1.2294 ++
1.2295 ++ if (toupper (str[1]) == 'U')
1.2296 ++ {
1.2297 ++ min_value = 0;
1.2298 ++ max_value = (1 << const_size) - 1;
1.2299 ++ }
1.2300 ++ else if (toupper (str[1]) == 'S')
1.2301 ++ {
1.2302 ++ min_value = -(1 << (const_size - 1));
1.2303 ++ max_value = (1 << (const_size - 1)) - 1;
1.2304 ++ }
1.2305 ++
1.2306 ++ if (c == 'I')
1.2307 ++ {
1.2308 ++ value = -value;
1.2309 ++ }
1.2310 ++
1.2311 ++ if (value >= min_value && value <= max_value)
1.2312 ++ {
1.2313 ++ return 1;
1.2314 ++ }
1.2315 ++ break;
1.2316 ++ }
1.2317 ++ case 'M':
1.2318 ++ return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
1.2319 ++ case 'J':
1.2320 ++ return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
1.2321 ++ case 'O':
1.2322 ++ return one_bit_set_operand (GEN_INT (value), VOIDmode);
1.2323 ++ case 'N':
1.2324 ++ return one_bit_cleared_operand (GEN_INT (value), VOIDmode);
1.2325 ++ case 'L':
1.2326 ++ /* The lower 16-bits are set. */
1.2327 ++ return ((value & 0xffff) == 0xffff) ;
1.2328 ++ }
1.2329 ++
1.2330 ++ return 0;
1.2331 ++}
1.2332 ++
1.2333 ++
1.2334 ++/*Compute mask of which floating-point registers needs saving upon
1.2335 ++ entry to this function*/
1.2336 ++static unsigned long
1.2337 ++avr32_compute_save_fp_reg_mask (void)
1.2338 ++{
1.2339 ++ unsigned long func_type = avr32_current_func_type ();
1.2340 ++ unsigned int save_reg_mask = 0;
1.2341 ++ unsigned int reg;
1.2342 ++ unsigned int max_reg = 7;
1.2343 ++ int save_all_call_used_regs = FALSE;
1.2344 ++
1.2345 ++ /* This only applies for hardware floating-point implementation. */
1.2346 ++ if (!TARGET_HARD_FLOAT)
1.2347 ++ return 0;
1.2348 ++
1.2349 ++ if (IS_INTERRUPT (func_type))
1.2350 ++ {
1.2351 ++
1.2352 ++ /* Interrupt functions must not corrupt any registers, even call
1.2353 ++ clobbered ones. If this is a leaf function we can just examine the
1.2354 ++ registers used by the RTL, but otherwise we have to assume that
1.2355 ++ whatever function is called might clobber anything, and so we have
1.2356 ++ to save all the call-clobbered registers as well. */
1.2357 ++ max_reg = 13;
1.2358 ++ save_all_call_used_regs = !current_function_is_leaf;
1.2359 ++ }
1.2360 ++
1.2361 ++ /* All used registers used must be saved */
1.2362 ++ for (reg = 0; reg <= max_reg; reg++)
1.2363 ++ if (regs_ever_live[INTERNAL_FP_REGNUM (reg)]
1.2364 ++ || (save_all_call_used_regs
1.2365 ++ && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
1.2366 ++ save_reg_mask |= (1 << reg);
1.2367 ++
1.2368 ++ return save_reg_mask;
1.2369 ++}
1.2370 ++
1.2371 ++/*Compute mask of registers which needs saving upon function entry */
1.2372 ++static unsigned long
1.2373 ++avr32_compute_save_reg_mask (int push)
1.2374 ++{
1.2375 ++ unsigned long func_type;
1.2376 ++ unsigned int save_reg_mask = 0;
1.2377 ++ unsigned int reg;
1.2378 ++
1.2379 ++ func_type = avr32_current_func_type ();
1.2380 ++
1.2381 ++ if (IS_INTERRUPT (func_type))
1.2382 ++ {
1.2383 ++ unsigned int max_reg = 12;
1.2384 ++
1.2385 ++
1.2386 ++ /* Get the banking scheme for the interrupt */
1.2387 ++ switch (func_type)
1.2388 ++ {
1.2389 ++ case AVR32_FT_ISR_FULL:
1.2390 ++ max_reg = 0;
1.2391 ++ break;
1.2392 ++ case AVR32_FT_ISR_HALF:
1.2393 ++ max_reg = 7;
1.2394 ++ break;
1.2395 ++ case AVR32_FT_ISR_NONE:
1.2396 ++ max_reg = 12;
1.2397 ++ break;
1.2398 ++ }
1.2399 ++
1.2400 ++ /* Interrupt functions must not corrupt any registers, even call
1.2401 ++ clobbered ones. If this is a leaf function we can just examine the
1.2402 ++ registers used by the RTL, but otherwise we have to assume that
1.2403 ++ whatever function is called might clobber anything, and so we have
1.2404 ++ to save all the call-clobbered registers as well. */
1.2405 ++
1.2406 ++ /* Need not push the registers r8-r12 for AVR32A architectures, as this
1.2407 ++ is automatially done in hardware. We also do not have any shadow
1.2408 ++ registers. */
1.2409 ++ if (TARGET_UARCH_AVR32A)
1.2410 ++ {
1.2411 ++ max_reg = 7;
1.2412 ++ func_type = AVR32_FT_ISR_NONE;
1.2413 ++ }
1.2414 ++
1.2415 ++ /* All registers which are used and is not shadowed must be saved */
1.2416 ++ for (reg = 0; reg <= max_reg; reg++)
1.2417 ++ if (regs_ever_live[INTERNAL_REGNUM (reg)]
1.2418 ++ || (!current_function_is_leaf
1.2419 ++ && call_used_regs[INTERNAL_REGNUM (reg)]))
1.2420 ++ save_reg_mask |= (1 << reg);
1.2421 ++
1.2422 ++ /* Check LR */
1.2423 ++ if ((regs_ever_live[LR_REGNUM]
1.2424 ++ || !current_function_is_leaf || frame_pointer_needed)
1.2425 ++ /* Only non-shadowed register models */
1.2426 ++ && (func_type == AVR32_FT_ISR_NONE))
1.2427 ++ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
1.2428 ++
1.2429 ++ /* Make sure that the GOT register is pushed. */
1.2430 ++ if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
1.2431 ++ && current_function_uses_pic_offset_table)
1.2432 ++ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
1.2433 ++
1.2434 ++ }
1.2435 ++ else
1.2436 ++ {
1.2437 ++ int use_pushm = optimize_size;
1.2438 ++
1.2439 ++ /* In the normal case we only need to save those registers which are
1.2440 ++ call saved and which are used by this function. */
1.2441 ++ for (reg = 0; reg <= 7; reg++)
1.2442 ++ if (regs_ever_live[INTERNAL_REGNUM (reg)]
1.2443 ++ && !call_used_regs[INTERNAL_REGNUM (reg)])
1.2444 ++ save_reg_mask |= (1 << reg);
1.2445 ++
1.2446 ++ /* Make sure that the GOT register is pushed. */
1.2447 ++ if (current_function_uses_pic_offset_table)
1.2448 ++ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
1.2449 ++
1.2450 ++
1.2451 ++ /* If we optimize for size and do not have anonymous arguments: use
1.2452 ++ popm/pushm always */
1.2453 ++ if (use_pushm)
1.2454 ++ {
1.2455 ++ if ((save_reg_mask & (1 << 0))
1.2456 ++ || (save_reg_mask & (1 << 1))
1.2457 ++ || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
1.2458 ++ save_reg_mask |= 0xf;
1.2459 ++
1.2460 ++ if ((save_reg_mask & (1 << 4))
1.2461 ++ || (save_reg_mask & (1 << 5))
1.2462 ++ || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
1.2463 ++ save_reg_mask |= 0xf0;
1.2464 ++
1.2465 ++ if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
1.2466 ++ save_reg_mask |= 0x300;
1.2467 ++ }
1.2468 ++
1.2469 ++
1.2470 ++ /* Check LR */
1.2471 ++ if ((regs_ever_live[LR_REGNUM]
1.2472 ++ || !current_function_is_leaf
1.2473 ++ || (optimize_size
1.2474 ++ && save_reg_mask
1.2475 ++ && !current_function_calls_eh_return) || frame_pointer_needed))
1.2476 ++ {
1.2477 ++ if (push
1.2478 ++ /* Never pop LR into PC for functions which
1.2479 ++ calls __builtin_eh_return, since we need to
1.2480 ++ fix the SP after the restoring of the registers
1.2481 ++ and before returning. */
1.2482 ++ || current_function_calls_eh_return)
1.2483 ++ {
1.2484 ++ /* Push/Pop LR */
1.2485 ++ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
1.2486 ++ }
1.2487 ++ else
1.2488 ++ {
1.2489 ++ /* Pop PC */
1.2490 ++ save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
1.2491 ++ }
1.2492 ++ }
1.2493 ++ }
1.2494 ++
1.2495 ++
1.2496 ++ /* Save registers so the exception handler can modify them. */
1.2497 ++ if (current_function_calls_eh_return)
1.2498 ++ {
1.2499 ++ unsigned int i;
1.2500 ++
1.2501 ++ for (i = 0;; i++)
1.2502 ++ {
1.2503 ++ reg = EH_RETURN_DATA_REGNO (i);
1.2504 ++ if (reg == INVALID_REGNUM)
1.2505 ++ break;
1.2506 ++ save_reg_mask |= 1 << ASM_REGNUM (reg);
1.2507 ++ }
1.2508 ++ }
1.2509 ++
1.2510 ++ return save_reg_mask;
1.2511 ++}
1.2512 ++
1.2513 ++/*Compute total size in bytes of all saved registers */
1.2514 ++static int
1.2515 ++avr32_get_reg_mask_size (int reg_mask)
1.2516 ++{
1.2517 ++ int reg, size;
1.2518 ++ size = 0;
1.2519 ++
1.2520 ++ for (reg = 0; reg <= 15; reg++)
1.2521 ++ if (reg_mask & (1 << reg))
1.2522 ++ size += 4;
1.2523 ++
1.2524 ++ return size;
1.2525 ++}
1.2526 ++
1.2527 ++/*Get a register from one of the registers which are saved onto the stack
1.2528 ++ upon function entry */
1.2529 ++
1.2530 ++static int
1.2531 ++avr32_get_saved_reg (int save_reg_mask)
1.2532 ++{
1.2533 ++ unsigned int reg;
1.2534 ++
1.2535 ++ /* Find the first register which is saved in the saved_reg_mask */
1.2536 ++ for (reg = 0; reg <= 15; reg++)
1.2537 ++ if (save_reg_mask & (1 << reg))
1.2538 ++ return reg;
1.2539 ++
1.2540 ++ return -1;
1.2541 ++}
1.2542 ++
1.2543 ++/* Return 1 if it is possible to return using a single instruction. */
1.2544 ++int
1.2545 ++avr32_use_return_insn (int iscond)
1.2546 ++{
1.2547 ++ unsigned int func_type = avr32_current_func_type ();
1.2548 ++ unsigned long saved_int_regs;
1.2549 ++ unsigned long saved_fp_regs;
1.2550 ++
1.2551 ++ /* Never use a return instruction before reload has run. */
1.2552 ++ if (!reload_completed)
1.2553 ++ return 0;
1.2554 ++
1.2555 ++ /* Must adjust the stack for vararg functions. */
1.2556 ++ if (current_function_args_info.uses_anonymous_args)
1.2557 ++ return 0;
1.2558 ++
1.2559 ++ /* If there a stack adjstment. */
1.2560 ++ if (get_frame_size ())
1.2561 ++ return 0;
1.2562 ++
1.2563 ++ saved_int_regs = avr32_compute_save_reg_mask (TRUE);
1.2564 ++ saved_fp_regs = avr32_compute_save_fp_reg_mask ();
1.2565 ++
1.2566 ++ /* Functions which have saved fp-regs on the stack can not be performed in
1.2567 ++ one instruction */
1.2568 ++ if (saved_fp_regs)
1.2569 ++ return 0;
1.2570 ++
1.2571 ++ /* Conditional returns can not be performed in one instruction if we need
1.2572 ++ to restore registers from the stack */
1.2573 ++ if (iscond && saved_int_regs)
1.2574 ++ return 0;
1.2575 ++
1.2576 ++ /* Conditional return can not be used for interrupt handlers. */
1.2577 ++ if (iscond && IS_INTERRUPT (func_type))
1.2578 ++ return 0;
1.2579 ++
1.2580 ++ /* For interrupt handlers which needs to pop registers */
1.2581 ++ if (saved_int_regs && IS_INTERRUPT (func_type))
1.2582 ++ return 0;
1.2583 ++
1.2584 ++
1.2585 ++ /* If there are saved registers but the LR isn't saved, then we need two
1.2586 ++ instructions for the return. */
1.2587 ++ if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
1.2588 ++ return 0;
1.2589 ++
1.2590 ++
1.2591 ++ return 1;
1.2592 ++}
1.2593 ++
1.2594 ++
1.2595 ++/*Generate some function prologue info in the assembly file*/
1.2596 ++
1.2597 ++void
1.2598 ++avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
1.2599 ++{
1.2600 ++ if (IS_NAKED (avr32_current_func_type ()))
1.2601 ++ fprintf (f,
1.2602 ++ "\t# Function is naked: Prologue and epilogue provided by programmer\n");
1.2603 ++
1.2604 ++ if (IS_INTERRUPT (avr32_current_func_type ()))
1.2605 ++ {
1.2606 ++ switch (avr32_current_func_type ())
1.2607 ++ {
1.2608 ++ case AVR32_FT_ISR_FULL:
1.2609 ++ fprintf (f,
1.2610 ++ "\t# Interrupt Function: Fully shadowed register file\n");
1.2611 ++ break;
1.2612 ++ case AVR32_FT_ISR_HALF:
1.2613 ++ fprintf (f,
1.2614 ++ "\t# Interrupt Function: Half shadowed register file\n");
1.2615 ++ break;
1.2616 ++ default:
1.2617 ++ case AVR32_FT_ISR_NONE:
1.2618 ++ fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
1.2619 ++ break;
1.2620 ++ }
1.2621 ++ }
1.2622 ++
1.2623 ++
1.2624 ++ fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
1.2625 ++ current_function_args_size, frame_size,
1.2626 ++ current_function_pretend_args_size);
1.2627 ++
1.2628 ++ fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
1.2629 ++ frame_pointer_needed, current_function_is_leaf);
1.2630 ++
1.2631 ++ fprintf (f, "\t# uses_anonymous_args = %i\n",
1.2632 ++ current_function_args_info.uses_anonymous_args);
1.2633 ++ if (current_function_calls_eh_return)
1.2634 ++ fprintf (f, "\t# Calls __builtin_eh_return.\n");
1.2635 ++
1.2636 ++}
1.2637 ++
1.2638 ++
1.2639 ++/* Generate and emit an insn that we will recognize as a pushm or stm.
1.2640 ++ Unfortunately, since this insn does not reflect very well the actual
1.2641 ++ semantics of the operation, we need to annotate the insn for the benefit
1.2642 ++ of DWARF2 frame unwind information. */
1.2643 ++
1.2644 ++int avr32_convert_to_reglist16 (int reglist8_vect);
1.2645 ++
1.2646 ++static rtx
1.2647 ++emit_multi_reg_push (int reglist, int usePUSHM)
1.2648 ++{
1.2649 ++ rtx insn;
1.2650 ++ rtx dwarf;
1.2651 ++ rtx tmp;
1.2652 ++ rtx reg;
1.2653 ++ int i;
1.2654 ++ int nr_regs;
1.2655 ++ int index = 0;
1.2656 ++
1.2657 ++ if (usePUSHM)
1.2658 ++ {
1.2659 ++ insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
1.2660 ++ reglist = avr32_convert_to_reglist16 (reglist);
1.2661 ++ }
1.2662 ++ else
1.2663 ++ {
1.2664 ++ insn = emit_insn (gen_stm (stack_pointer_rtx,
1.2665 ++ gen_rtx_CONST_INT (SImode, reglist),
1.2666 ++ gen_rtx_CONST_INT (SImode, 1)));
1.2667 ++ }
1.2668 ++
1.2669 ++ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
1.2670 ++ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
1.2671 ++
1.2672 ++ for (i = 15; i >= 0; i--)
1.2673 ++ {
1.2674 ++ if (reglist & (1 << i))
1.2675 ++ {
1.2676 ++ reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
1.2677 ++ tmp = gen_rtx_SET (VOIDmode,
1.2678 ++ gen_rtx_MEM (SImode,
1.2679 ++ plus_constant (stack_pointer_rtx,
1.2680 ++ 4 * index)), reg);
1.2681 ++ RTX_FRAME_RELATED_P (tmp) = 1;
1.2682 ++ XVECEXP (dwarf, 0, 1 + index++) = tmp;
1.2683 ++ }
1.2684 ++ }
1.2685 ++
1.2686 ++ tmp = gen_rtx_SET (SImode,
1.2687 ++ stack_pointer_rtx,
1.2688 ++ gen_rtx_PLUS (SImode,
1.2689 ++ stack_pointer_rtx,
1.2690 ++ GEN_INT (-4 * nr_regs)));
1.2691 ++ RTX_FRAME_RELATED_P (tmp) = 1;
1.2692 ++ XVECEXP (dwarf, 0, 0) = tmp;
1.2693 ++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
1.2694 ++ REG_NOTES (insn));
1.2695 ++ return insn;
1.2696 ++}
1.2697 ++
1.2698 ++
1.2699 ++static rtx
1.2700 ++emit_multi_fp_reg_push (int reglist)
1.2701 ++{
1.2702 ++ rtx insn;
1.2703 ++ rtx dwarf;
1.2704 ++ rtx tmp;
1.2705 ++ rtx reg;
1.2706 ++ int i;
1.2707 ++ int nr_regs;
1.2708 ++ int index = 0;
1.2709 ++
1.2710 ++ insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
1.2711 ++ gen_rtx_CONST_INT (SImode, reglist),
1.2712 ++ gen_rtx_CONST_INT (SImode, 1)));
1.2713 ++
1.2714 ++ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
1.2715 ++ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
1.2716 ++
1.2717 ++ for (i = 15; i >= 0; i--)
1.2718 ++ {
1.2719 ++ if (reglist & (1 << i))
1.2720 ++ {
1.2721 ++ reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
1.2722 ++ tmp = gen_rtx_SET (VOIDmode,
1.2723 ++ gen_rtx_MEM (SImode,
1.2724 ++ plus_constant (stack_pointer_rtx,
1.2725 ++ 4 * index)), reg);
1.2726 ++ RTX_FRAME_RELATED_P (tmp) = 1;
1.2727 ++ XVECEXP (dwarf, 0, 1 + index++) = tmp;
1.2728 ++ }
1.2729 ++ }
1.2730 ++
1.2731 ++ tmp = gen_rtx_SET (SImode,
1.2732 ++ stack_pointer_rtx,
1.2733 ++ gen_rtx_PLUS (SImode,
1.2734 ++ stack_pointer_rtx,
1.2735 ++ GEN_INT (-4 * nr_regs)));
1.2736 ++ RTX_FRAME_RELATED_P (tmp) = 1;
1.2737 ++ XVECEXP (dwarf, 0, 0) = tmp;
1.2738 ++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
1.2739 ++ REG_NOTES (insn));
1.2740 ++ return insn;
1.2741 ++}
1.2742 ++
1.2743 ++rtx
1.2744 ++avr32_gen_load_multiple (rtx * regs, int count, rtx from,
1.2745 ++ int write_back, int in_struct_p, int scalar_p)
1.2746 ++{
1.2747 ++
1.2748 ++ rtx result;
1.2749 ++ int i = 0, j;
1.2750 ++
1.2751 ++ result =
1.2752 ++ gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
1.2753 ++
1.2754 ++ if (write_back)
1.2755 ++ {
1.2756 ++ XVECEXP (result, 0, 0)
1.2757 ++ = gen_rtx_SET (GET_MODE (from), from,
1.2758 ++ plus_constant (from, count * 4));
1.2759 ++ i = 1;
1.2760 ++ count++;
1.2761 ++ }
1.2762 ++
1.2763 ++
1.2764 ++ for (j = 0; i < count; i++, j++)
1.2765 ++ {
1.2766 ++ rtx unspec;
1.2767 ++ rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
1.2768 ++ MEM_IN_STRUCT_P (mem) = in_struct_p;
1.2769 ++ MEM_SCALAR_P (mem) = scalar_p;
1.2770 ++ unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
1.2771 ++ XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
1.2772 ++ }
1.2773 ++
1.2774 ++ return result;
1.2775 ++}
1.2776 ++
1.2777 ++
1.2778 ++rtx
1.2779 ++avr32_gen_store_multiple (rtx * regs, int count, rtx to,
1.2780 ++ int in_struct_p, int scalar_p)
1.2781 ++{
1.2782 ++ rtx result;
1.2783 ++ int i = 0, j;
1.2784 ++
1.2785 ++ result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
1.2786 ++
1.2787 ++ for (j = 0; i < count; i++, j++)
1.2788 ++ {
1.2789 ++ rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
1.2790 ++ MEM_IN_STRUCT_P (mem) = in_struct_p;
1.2791 ++ MEM_SCALAR_P (mem) = scalar_p;
1.2792 ++ XVECEXP (result, 0, i)
1.2793 ++ = gen_rtx_SET (VOIDmode, mem,
1.2794 ++ gen_rtx_UNSPEC (VOIDmode,
1.2795 ++ gen_rtvec (1, regs[j]),
1.2796 ++ UNSPEC_STORE_MULTIPLE));
1.2797 ++ }
1.2798 ++
1.2799 ++ return result;
1.2800 ++}
1.2801 ++
1.2802 ++
1.2803 ++/* Move a block of memory if it is word aligned or we support unaligned
1.2804 ++ word memory accesses. The size must be maximum 64 bytes. */
1.2805 ++
1.2806 ++int
1.2807 ++avr32_gen_movmemsi (rtx * operands)
1.2808 ++{
1.2809 ++ HOST_WIDE_INT bytes_to_go;
1.2810 ++ rtx src, dst;
1.2811 ++ rtx st_src, st_dst;
1.2812 ++ int src_offset = 0, dst_offset = 0;
1.2813 ++ int block_size;
1.2814 ++ int dst_in_struct_p, src_in_struct_p;
1.2815 ++ int dst_scalar_p, src_scalar_p;
1.2816 ++ int unaligned;
1.2817 ++
1.2818 ++ if (GET_CODE (operands[2]) != CONST_INT
1.2819 ++ || GET_CODE (operands[3]) != CONST_INT
1.2820 ++ || INTVAL (operands[2]) > 64
1.2821 ++ || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
1.2822 ++ return 0;
1.2823 ++
1.2824 ++ unaligned = (INTVAL (operands[3]) & 3) != 0;
1.2825 ++
1.2826 ++ block_size = 4;
1.2827 ++
1.2828 ++ st_dst = XEXP (operands[0], 0);
1.2829 ++ st_src = XEXP (operands[1], 0);
1.2830 ++
1.2831 ++ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
1.2832 ++ dst_scalar_p = MEM_SCALAR_P (operands[0]);
1.2833 ++ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
1.2834 ++ src_scalar_p = MEM_SCALAR_P (operands[1]);
1.2835 ++
1.2836 ++ dst = copy_to_mode_reg (SImode, st_dst);
1.2837 ++ src = copy_to_mode_reg (SImode, st_src);
1.2838 ++
1.2839 ++ bytes_to_go = INTVAL (operands[2]);
1.2840 ++
1.2841 ++ while (bytes_to_go)
1.2842 ++ {
1.2843 ++ enum machine_mode move_mode;
1.2844 ++ /* (Seems to be a problem with reloads for the movti pattern so this is
1.2845 ++ disabled until that problem is resolved)
1.2846 ++ UPDATE: Problem seems to be solved now.... */
1.2847 ++ if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
1.2848 ++ /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
1.2849 ++ && !TARGET_ARCH_UC)
1.2850 ++ move_mode = TImode;
1.2851 ++ else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
1.2852 ++ move_mode = DImode;
1.2853 ++ else if (bytes_to_go >= GET_MODE_SIZE (SImode))
1.2854 ++ move_mode = SImode;
1.2855 ++ else
1.2856 ++ move_mode = QImode;
1.2857 ++
1.2858 ++ {
1.2859 ++ rtx src_mem;
1.2860 ++ rtx dst_mem = gen_rtx_MEM (move_mode,
1.2861 ++ gen_rtx_PLUS (SImode, dst,
1.2862 ++ GEN_INT (dst_offset)));
1.2863 ++ dst_offset += GET_MODE_SIZE (move_mode);
1.2864 ++ if ( 0 /* This causes an error in GCC. Think there is
1.2865 ++ something wrong in the gcse pass which causes REQ_EQUIV notes
1.2866 ++ to be wrong so disabling it for now. */
1.2867 ++ && move_mode == TImode
1.2868 ++ && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
1.2869 ++ {
1.2870 ++ src_mem = gen_rtx_MEM (move_mode,
1.2871 ++ gen_rtx_POST_INC (SImode, src));
1.2872 ++ }
1.2873 ++ else
1.2874 ++ {
1.2875 ++ src_mem = gen_rtx_MEM (move_mode,
1.2876 ++ gen_rtx_PLUS (SImode, src,
1.2877 ++ GEN_INT (src_offset)));
1.2878 ++ src_offset += GET_MODE_SIZE (move_mode);
1.2879 ++ }
1.2880 ++
1.2881 ++ bytes_to_go -= GET_MODE_SIZE (move_mode);
1.2882 ++
1.2883 ++ MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
1.2884 ++ MEM_SCALAR_P (dst_mem) = dst_scalar_p;
1.2885 ++
1.2886 ++ MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
1.2887 ++ MEM_SCALAR_P (src_mem) = src_scalar_p;
1.2888 ++ emit_move_insn (dst_mem, src_mem);
1.2889 ++
1.2890 ++ }
1.2891 ++ }
1.2892 ++
1.2893 ++ return 1;
1.2894 ++}
1.2895 ++
1.2896 ++
1.2897 ++
1.2898 ++/*Expand the prologue instruction*/
1.2899 ++void
1.2900 ++avr32_expand_prologue (void)
1.2901 ++{
1.2902 ++ rtx insn, dwarf;
1.2903 ++ unsigned long saved_reg_mask, saved_fp_reg_mask;
1.2904 ++ int reglist8 = 0;
1.2905 ++
1.2906 ++ /* Naked functions does not have a prologue */
1.2907 ++ if (IS_NAKED (avr32_current_func_type ()))
1.2908 ++ return;
1.2909 ++
1.2910 ++ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
1.2911 ++
1.2912 ++ if (saved_reg_mask)
1.2913 ++ {
1.2914 ++ /* Must push used registers */
1.2915 ++
1.2916 ++ /* Should we use POPM or LDM? */
1.2917 ++ int usePUSHM = TRUE;
1.2918 ++ reglist8 = 0;
1.2919 ++ if (((saved_reg_mask & (1 << 0)) ||
1.2920 ++ (saved_reg_mask & (1 << 1)) ||
1.2921 ++ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
1.2922 ++ {
1.2923 ++ /* One of R0-R3 should at least be pushed */
1.2924 ++ if (((saved_reg_mask & (1 << 0)) &&
1.2925 ++ (saved_reg_mask & (1 << 1)) &&
1.2926 ++ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
1.2927 ++ {
1.2928 ++ /* All should be pushed */
1.2929 ++ reglist8 |= 0x01;
1.2930 ++ }
1.2931 ++ else
1.2932 ++ {
1.2933 ++ usePUSHM = FALSE;
1.2934 ++ }
1.2935 ++ }
1.2936 ++
1.2937 ++ if (((saved_reg_mask & (1 << 4)) ||
1.2938 ++ (saved_reg_mask & (1 << 5)) ||
1.2939 ++ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
1.2940 ++ {
1.2941 ++ /* One of R4-R7 should at least be pushed */
1.2942 ++ if (((saved_reg_mask & (1 << 4)) &&
1.2943 ++ (saved_reg_mask & (1 << 5)) &&
1.2944 ++ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
1.2945 ++ {
1.2946 ++ if (usePUSHM)
1.2947 ++ /* All should be pushed */
1.2948 ++ reglist8 |= 0x02;
1.2949 ++ }
1.2950 ++ else
1.2951 ++ {
1.2952 ++ usePUSHM = FALSE;
1.2953 ++ }
1.2954 ++ }
1.2955 ++
1.2956 ++ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
1.2957 ++ {
1.2958 ++ /* One of R8-R9 should at least be pushed */
1.2959 ++ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
1.2960 ++ {
1.2961 ++ if (usePUSHM)
1.2962 ++ /* All should be pushed */
1.2963 ++ reglist8 |= 0x04;
1.2964 ++ }
1.2965 ++ else
1.2966 ++ {
1.2967 ++ usePUSHM = FALSE;
1.2968 ++ }
1.2969 ++ }
1.2970 ++
1.2971 ++ if (saved_reg_mask & (1 << 10))
1.2972 ++ reglist8 |= 0x08;
1.2973 ++
1.2974 ++ if (saved_reg_mask & (1 << 11))
1.2975 ++ reglist8 |= 0x10;
1.2976 ++
1.2977 ++ if (saved_reg_mask & (1 << 12))
1.2978 ++ reglist8 |= 0x20;
1.2979 ++
1.2980 ++ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
1.2981 ++ {
1.2982 ++ /* Push LR */
1.2983 ++ reglist8 |= 0x40;
1.2984 ++ }
1.2985 ++
1.2986 ++ if (usePUSHM)
1.2987 ++ {
1.2988 ++ insn = emit_multi_reg_push (reglist8, TRUE);
1.2989 ++ }
1.2990 ++ else
1.2991 ++ {
1.2992 ++ insn = emit_multi_reg_push (saved_reg_mask, FALSE);
1.2993 ++ }
1.2994 ++ RTX_FRAME_RELATED_P (insn) = 1;
1.2995 ++
1.2996 ++ /* Prevent this instruction from being scheduled after any other
1.2997 ++ instructions. */
1.2998 ++ emit_insn (gen_blockage ());
1.2999 ++ }
1.3000 ++
1.3001 ++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
1.3002 ++ if (saved_fp_reg_mask)
1.3003 ++ {
1.3004 ++ insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
1.3005 ++ RTX_FRAME_RELATED_P (insn) = 1;
1.3006 ++
1.3007 ++ /* Prevent this instruction from being scheduled after any other
1.3008 ++ instructions. */
1.3009 ++ emit_insn (gen_blockage ());
1.3010 ++ }
1.3011 ++
1.3012 ++ /* Set frame pointer */
1.3013 ++ if (frame_pointer_needed)
1.3014 ++ {
1.3015 ++ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
1.3016 ++ RTX_FRAME_RELATED_P (insn) = 1;
1.3017 ++ }
1.3018 ++
1.3019 ++ if (get_frame_size () > 0)
1.3020 ++ {
1.3021 ++ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
1.3022 ++ {
1.3023 ++ insn = emit_insn (gen_rtx_SET (SImode,
1.3024 ++ stack_pointer_rtx,
1.3025 ++ gen_rtx_PLUS (SImode,
1.3026 ++ stack_pointer_rtx,
1.3027 ++ gen_rtx_CONST_INT
1.3028 ++ (SImode,
1.3029 ++ -get_frame_size
1.3030 ++ ()))));
1.3031 ++ RTX_FRAME_RELATED_P (insn) = 1;
1.3032 ++ }
1.3033 ++ else
1.3034 ++ {
1.3035 ++ /* Immediate is larger than k21 We must either check if we can use
1.3036 ++ one of the pushed reegisters as temporary storage or we must
1.3037 ++ make us a temp register by pushing a register to the stack. */
1.3038 ++ rtx temp_reg, const_pool_entry, insn;
1.3039 ++ if (saved_reg_mask)
1.3040 ++ {
1.3041 ++ temp_reg =
1.3042 ++ gen_rtx_REG (SImode,
1.3043 ++ INTERNAL_REGNUM (avr32_get_saved_reg
1.3044 ++ (saved_reg_mask)));
1.3045 ++ }
1.3046 ++ else
1.3047 ++ {
1.3048 ++ temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
1.3049 ++ emit_move_insn (gen_rtx_MEM
1.3050 ++ (SImode,
1.3051 ++ gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
1.3052 ++ temp_reg);
1.3053 ++ }
1.3054 ++
1.3055 ++ const_pool_entry =
1.3056 ++ force_const_mem (SImode,
1.3057 ++ gen_rtx_CONST_INT (SImode, get_frame_size ()));
1.3058 ++ emit_move_insn (temp_reg, const_pool_entry);
1.3059 ++
1.3060 ++ insn = emit_insn (gen_rtx_SET (SImode,
1.3061 ++ stack_pointer_rtx,
1.3062 ++ gen_rtx_MINUS (SImode,
1.3063 ++ stack_pointer_rtx,
1.3064 ++ temp_reg)));
1.3065 ++
1.3066 ++ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
1.3067 ++ gen_rtx_PLUS (SImode, stack_pointer_rtx,
1.3068 ++ GEN_INT (-get_frame_size ())));
1.3069 ++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
1.3070 ++ dwarf, REG_NOTES (insn));
1.3071 ++ RTX_FRAME_RELATED_P (insn) = 1;
1.3072 ++
1.3073 ++ if (!saved_reg_mask)
1.3074 ++ {
1.3075 ++ insn =
1.3076 ++ emit_move_insn (temp_reg,
1.3077 ++ gen_rtx_MEM (SImode,
1.3078 ++ gen_rtx_POST_INC (SImode,
1.3079 ++ gen_rtx_REG
1.3080 ++ (SImode,
1.3081 ++ 13))));
1.3082 ++ }
1.3083 ++
1.3084 ++ /* Mark the temp register as dead */
1.3085 ++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
1.3086 ++ REG_NOTES (insn));
1.3087 ++
1.3088 ++
1.3089 ++ }
1.3090 ++
1.3091 ++ /* Prevent the the stack adjustment to be scheduled after any
1.3092 ++ instructions using the frame pointer. */
1.3093 ++ emit_insn (gen_blockage ());
1.3094 ++ }
1.3095 ++
1.3096 ++ /* Load GOT */
1.3097 ++ if (flag_pic)
1.3098 ++ {
1.3099 ++ avr32_load_pic_register ();
1.3100 ++
1.3101 ++ /* gcc does not know that load or call instructions might use the pic
1.3102 ++ register so it might schedule these instructions before the loading
1.3103 ++ of the pic register. To avoid this emit a barrier for now. TODO!
1.3104 ++ Find out a better way to let gcc know which instructions might use
1.3105 ++ the pic register. */
1.3106 ++ emit_insn (gen_blockage ());
1.3107 ++ }
1.3108 ++ return;
1.3109 ++}
1.3110 ++
1.3111 ++void
1.3112 ++avr32_set_return_address (rtx source, rtx scratch)
1.3113 ++{
1.3114 ++ rtx addr;
1.3115 ++ unsigned long saved_regs;
1.3116 ++
1.3117 ++ saved_regs = avr32_compute_save_reg_mask (TRUE);
1.3118 ++
1.3119 ++ if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
1.3120 ++ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
1.3121 ++ else
1.3122 ++ {
1.3123 ++ if (frame_pointer_needed)
1.3124 ++ addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
1.3125 ++ else
1.3126 ++ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
1.3127 ++ {
1.3128 ++ addr = plus_constant (stack_pointer_rtx, get_frame_size ());
1.3129 ++ }
1.3130 ++ else
1.3131 ++ {
1.3132 ++ emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
1.3133 ++ addr = scratch;
1.3134 ++ }
1.3135 ++ emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
1.3136 ++ }
1.3137 ++}
1.3138 ++
1.3139 ++
1.3140 ++
1.3141 ++/* Return the length of INSN. LENGTH is the initial length computed by
1.3142 ++ attributes in the machine-description file. */
1.3143 ++
1.3144 ++int
1.3145 ++avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
1.3146 ++ int length ATTRIBUTE_UNUSED)
1.3147 ++{
1.3148 ++ return length;
1.3149 ++}
1.3150 ++
1.3151 ++void
1.3152 ++avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
1.3153 ++ int iscond ATTRIBUTE_UNUSED,
1.3154 ++ rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
1.3155 ++{
1.3156 ++
1.3157 ++ unsigned long saved_reg_mask, saved_fp_reg_mask;
1.3158 ++ int insert_ret = TRUE;
1.3159 ++ int reglist8 = 0;
1.3160 ++ int stack_adjustment = get_frame_size ();
1.3161 ++ unsigned int func_type = avr32_current_func_type ();
1.3162 ++ FILE *f = asm_out_file;
1.3163 ++
1.3164 ++ /* Naked functions does not have an epilogue */
1.3165 ++ if (IS_NAKED (func_type))
1.3166 ++ return;
1.3167 ++
1.3168 ++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
1.3169 ++
1.3170 ++ saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
1.3171 ++
1.3172 ++ /* Reset frame pointer */
1.3173 ++ if (stack_adjustment > 0)
1.3174 ++ {
1.3175 ++ if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
1.3176 ++ {
1.3177 ++ fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
1.3178 ++ -stack_adjustment);
1.3179 ++ }
1.3180 ++ else
1.3181 ++ {
1.3182 ++ /* TODO! Is it safe to use r8 as scratch?? */
1.3183 ++ fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
1.3184 ++ -stack_adjustment);
1.3185 ++ fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
1.3186 ++ -stack_adjustment);
1.3187 ++ fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
1.3188 ++ }
1.3189 ++ }
1.3190 ++
1.3191 ++ if (saved_fp_reg_mask)
1.3192 ++ {
1.3193 ++ char reglist[64]; /* 64 bytes should be enough... */
1.3194 ++ avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
1.3195 ++ fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
1.3196 ++ if (saved_fp_reg_mask & ~0xff)
1.3197 ++ {
1.3198 ++ saved_fp_reg_mask &= ~0xff;
1.3199 ++ avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
1.3200 ++ fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
1.3201 ++ }
1.3202 ++ }
1.3203 ++
1.3204 ++ if (saved_reg_mask)
1.3205 ++ {
1.3206 ++ /* Must pop used registers */
1.3207 ++
1.3208 ++ /* Should we use POPM or LDM? */
1.3209 ++ int usePOPM = TRUE;
1.3210 ++ if (((saved_reg_mask & (1 << 0)) ||
1.3211 ++ (saved_reg_mask & (1 << 1)) ||
1.3212 ++ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
1.3213 ++ {
1.3214 ++ /* One of R0-R3 should at least be popped */
1.3215 ++ if (((saved_reg_mask & (1 << 0)) &&
1.3216 ++ (saved_reg_mask & (1 << 1)) &&
1.3217 ++ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
1.3218 ++ {
1.3219 ++ /* All should be popped */
1.3220 ++ reglist8 |= 0x01;
1.3221 ++ }
1.3222 ++ else
1.3223 ++ {
1.3224 ++ usePOPM = FALSE;
1.3225 ++ }
1.3226 ++ }
1.3227 ++
1.3228 ++ if (((saved_reg_mask & (1 << 4)) ||
1.3229 ++ (saved_reg_mask & (1 << 5)) ||
1.3230 ++ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
1.3231 ++ {
1.3232 ++ /* One of R0-R3 should at least be popped */
1.3233 ++ if (((saved_reg_mask & (1 << 4)) &&
1.3234 ++ (saved_reg_mask & (1 << 5)) &&
1.3235 ++ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
1.3236 ++ {
1.3237 ++ if (usePOPM)
1.3238 ++ /* All should be popped */
1.3239 ++ reglist8 |= 0x02;
1.3240 ++ }
1.3241 ++ else
1.3242 ++ {
1.3243 ++ usePOPM = FALSE;
1.3244 ++ }
1.3245 ++ }
1.3246 ++
1.3247 ++ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
1.3248 ++ {
1.3249 ++ /* One of R8-R9 should at least be pushed */
1.3250 ++ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
1.3251 ++ {
1.3252 ++ if (usePOPM)
1.3253 ++ /* All should be pushed */
1.3254 ++ reglist8 |= 0x04;
1.3255 ++ }
1.3256 ++ else
1.3257 ++ {
1.3258 ++ usePOPM = FALSE;
1.3259 ++ }
1.3260 ++ }
1.3261 ++
1.3262 ++ if (saved_reg_mask & (1 << 10))
1.3263 ++ reglist8 |= 0x08;
1.3264 ++
1.3265 ++ if (saved_reg_mask & (1 << 11))
1.3266 ++ reglist8 |= 0x10;
1.3267 ++
1.3268 ++ if (saved_reg_mask & (1 << 12))
1.3269 ++ reglist8 |= 0x20;
1.3270 ++
1.3271 ++ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
1.3272 ++ /* Pop LR */
1.3273 ++ reglist8 |= 0x40;
1.3274 ++
1.3275 ++ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
1.3276 ++ /* Pop LR into PC. */
1.3277 ++ reglist8 |= 0x80;
1.3278 ++
1.3279 ++ if (usePOPM)
1.3280 ++ {
1.3281 ++ char reglist[64]; /* 64 bytes should be enough... */
1.3282 ++ avr32_make_reglist8 (reglist8, (char *) reglist);
1.3283 ++
1.3284 ++ if (reglist8 & 0x80)
1.3285 ++ /* This instruction is also a return */
1.3286 ++ insert_ret = FALSE;
1.3287 ++
1.3288 ++ if (r12_imm && !insert_ret)
1.3289 ++ fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
1.3290 ++ else
1.3291 ++ fprintf (f, "\tpopm\t%s\n", reglist);
1.3292 ++
1.3293 ++ }
1.3294 ++ else
1.3295 ++ {
1.3296 ++ char reglist[64]; /* 64 bytes should be enough... */
1.3297 ++ avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
1.3298 ++ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
1.3299 ++ /* This instruction is also a return */
1.3300 ++ insert_ret = FALSE;
1.3301 ++
1.3302 ++ if (r12_imm && !insert_ret)
1.3303 ++ fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
1.3304 ++ INTVAL (r12_imm));
1.3305 ++ else
1.3306 ++ fprintf (f, "\tldm\tsp++, %s\n", reglist);
1.3307 ++
1.3308 ++ }
1.3309 ++
1.3310 ++ }
1.3311 ++
1.3312 ++ /* Stack adjustment for exception handler. */
1.3313 ++ if (current_function_calls_eh_return)
1.3314 ++ fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
1.3315 ++
1.3316 ++
1.3317 ++ if (IS_INTERRUPT (func_type))
1.3318 ++ {
1.3319 ++ fprintf (f, "\trete\n");
1.3320 ++ }
1.3321 ++ else if (insert_ret)
1.3322 ++ {
1.3323 ++ if (r12_imm)
1.3324 ++ fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
1.3325 ++ else
1.3326 ++ fprintf (f, "\tretal\tr12\n");
1.3327 ++ }
1.3328 ++}
1.3329 ++
1.3330 ++/* Function for converting a fp-register mask to a
1.3331 ++ reglistCPD8 register list string. */
1.3332 ++void
1.3333 ++avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string)
1.3334 ++{
1.3335 ++ int i;
1.3336 ++
1.3337 ++ /* Make sure reglist_string is empty */
1.3338 ++ reglist_string[0] = '\0';
1.3339 ++
1.3340 ++ for (i = 0; i < NUM_FP_REGS; i += 2)
1.3341 ++ {
1.3342 ++ if (reglist_mask & (1 << i))
1.3343 ++ {
1.3344 ++ strlen (reglist_string) ?
1.3345 ++ sprintf (reglist_string, "%s, %s-%s", reglist_string,
1.3346 ++ reg_names[INTERNAL_FP_REGNUM (i)],
1.3347 ++ reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
1.3348 ++ sprintf (reglist_string, "%s-%s",
1.3349 ++ reg_names[INTERNAL_FP_REGNUM (i)],
1.3350 ++ reg_names[INTERNAL_FP_REGNUM (i + 1)]);
1.3351 ++ }
1.3352 ++ }
1.3353 ++}
1.3354 ++
1.3355 ++/* Function for converting a fp-register mask to a
1.3356 ++ reglistCP8 register list string. */
1.3357 ++void
1.3358 ++avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string)
1.3359 ++{
1.3360 ++ int i;
1.3361 ++
1.3362 ++ /* Make sure reglist_string is empty */
1.3363 ++ reglist_string[0] = '\0';
1.3364 ++
1.3365 ++ for (i = 0; i < NUM_FP_REGS; ++i)
1.3366 ++ {
1.3367 ++ if (reglist_mask & (1 << i))
1.3368 ++ {
1.3369 ++ strlen (reglist_string) ?
1.3370 ++ sprintf (reglist_string, "%s, %s", reglist_string,
1.3371 ++ reg_names[INTERNAL_FP_REGNUM (i)]) :
1.3372 ++ sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
1.3373 ++ }
1.3374 ++ }
1.3375 ++}
1.3376 ++
1.3377 ++void
1.3378 ++avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
1.3379 ++{
1.3380 ++ int i;
1.3381 ++
1.3382 ++ /* Make sure reglist16_string is empty */
1.3383 ++ reglist16_string[0] = '\0';
1.3384 ++
1.3385 ++ for (i = 0; i < 16; ++i)
1.3386 ++ {
1.3387 ++ if (reglist16_vect & (1 << i))
1.3388 ++ {
1.3389 ++ strlen (reglist16_string) ?
1.3390 ++ sprintf (reglist16_string, "%s, %s", reglist16_string,
1.3391 ++ reg_names[INTERNAL_REGNUM (i)]) :
1.3392 ++ sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
1.3393 ++ }
1.3394 ++ }
1.3395 ++}
1.3396 ++
1.3397 ++int
1.3398 ++avr32_convert_to_reglist16 (int reglist8_vect)
1.3399 ++{
1.3400 ++ int reglist16_vect = 0;
1.3401 ++ if (reglist8_vect & 0x1)
1.3402 ++ reglist16_vect |= 0xF;
1.3403 ++ if (reglist8_vect & 0x2)
1.3404 ++ reglist16_vect |= 0xF0;
1.3405 ++ if (reglist8_vect & 0x4)
1.3406 ++ reglist16_vect |= 0x300;
1.3407 ++ if (reglist8_vect & 0x8)
1.3408 ++ reglist16_vect |= 0x400;
1.3409 ++ if (reglist8_vect & 0x10)
1.3410 ++ reglist16_vect |= 0x800;
1.3411 ++ if (reglist8_vect & 0x20)
1.3412 ++ reglist16_vect |= 0x1000;
1.3413 ++ if (reglist8_vect & 0x40)
1.3414 ++ reglist16_vect |= 0x4000;
1.3415 ++ if (reglist8_vect & 0x80)
1.3416 ++ reglist16_vect |= 0x8000;
1.3417 ++
1.3418 ++ return reglist16_vect;
1.3419 ++}
1.3420 ++
1.3421 ++void
1.3422 ++avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
1.3423 ++{
1.3424 ++ /* Make sure reglist8_string is empty */
1.3425 ++ reglist8_string[0] = '\0';
1.3426 ++
1.3427 ++ if (reglist8_vect & 0x1)
1.3428 ++ sprintf (reglist8_string, "r0-r3");
1.3429 ++ if (reglist8_vect & 0x2)
1.3430 ++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
1.3431 ++ reglist8_string) :
1.3432 ++ sprintf (reglist8_string, "r4-r7");
1.3433 ++ if (reglist8_vect & 0x4)
1.3434 ++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
1.3435 ++ reglist8_string) :
1.3436 ++ sprintf (reglist8_string, "r8-r9");
1.3437 ++ if (reglist8_vect & 0x8)
1.3438 ++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
1.3439 ++ reglist8_string) :
1.3440 ++ sprintf (reglist8_string, "r10");
1.3441 ++ if (reglist8_vect & 0x10)
1.3442 ++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
1.3443 ++ reglist8_string) :
1.3444 ++ sprintf (reglist8_string, "r11");
1.3445 ++ if (reglist8_vect & 0x20)
1.3446 ++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
1.3447 ++ reglist8_string) :
1.3448 ++ sprintf (reglist8_string, "r12");
1.3449 ++ if (reglist8_vect & 0x40)
1.3450 ++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
1.3451 ++ reglist8_string) :
1.3452 ++ sprintf (reglist8_string, "lr");
1.3453 ++ if (reglist8_vect & 0x80)
1.3454 ++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
1.3455 ++ reglist8_string) :
1.3456 ++ sprintf (reglist8_string, "pc");
1.3457 ++}
1.3458 ++
1.3459 ++int
1.3460 ++avr32_eh_return_data_regno (int n)
1.3461 ++{
1.3462 ++ if (n >= 0 && n <= 3)
1.3463 ++ return 8 + n;
1.3464 ++ else
1.3465 ++ return INVALID_REGNUM;
1.3466 ++}
1.3467 ++
1.3468 ++/* Compute the distance from register FROM to register TO.
1.3469 ++ These can be the arg pointer, the frame pointer or
1.3470 ++ the stack pointer.
1.3471 ++ Typical stack layout looks like this:
1.3472 ++
1.3473 ++ old stack pointer -> | |
1.3474 ++ ----
1.3475 ++ | | \
1.3476 ++ | | saved arguments for
1.3477 ++ | | vararg functions
1.3478 ++ arg_pointer -> | | /
1.3479 ++ --
1.3480 ++ | | \
1.3481 ++ | | call saved
1.3482 ++ | | registers
1.3483 ++ | | /
1.3484 ++ frame ptr -> --
1.3485 ++ | | \
1.3486 ++ | | local
1.3487 ++ | | variables
1.3488 ++ stack ptr --> | | /
1.3489 ++ --
1.3490 ++ | | \
1.3491 ++ | | outgoing
1.3492 ++ | | arguments
1.3493 ++ | | /
1.3494 ++ --
1.3495 ++
1.3496 ++ For a given funciton some or all of these stack compomnents
1.3497 ++ may not be needed, giving rise to the possibility of
1.3498 ++ eliminating some of the registers.
1.3499 ++
1.3500 ++ The values returned by this function must reflect the behaviour
1.3501 ++ of avr32_expand_prologue() and avr32_compute_save_reg_mask().
1.3502 ++
1.3503 ++ The sign of the number returned reflects the direction of stack
1.3504 ++ growth, so the values are positive for all eliminations except
1.3505 ++ from the soft frame pointer to the hard frame pointer. */
1.3506 ++
1.3507 ++
1.3508 ++int
1.3509 ++avr32_initial_elimination_offset (int from, int to)
1.3510 ++{
1.3511 ++ int i;
1.3512 ++ int call_saved_regs = 0;
1.3513 ++ unsigned long saved_reg_mask, saved_fp_reg_mask;
1.3514 ++ unsigned int local_vars = get_frame_size ();
1.3515 ++
1.3516 ++ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
1.3517 ++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
1.3518 ++
1.3519 ++ for (i = 0; i < 16; ++i)
1.3520 ++ {
1.3521 ++ if (saved_reg_mask & (1 << i))
1.3522 ++ call_saved_regs += 4;
1.3523 ++ }
1.3524 ++
1.3525 ++ for (i = 0; i < NUM_FP_REGS; ++i)
1.3526 ++ {
1.3527 ++ if (saved_fp_reg_mask & (1 << i))
1.3528 ++ call_saved_regs += 4;
1.3529 ++ }
1.3530 ++
1.3531 ++ switch (from)
1.3532 ++ {
1.3533 ++ case ARG_POINTER_REGNUM:
1.3534 ++ switch (to)
1.3535 ++ {
1.3536 ++ case STACK_POINTER_REGNUM:
1.3537 ++ return call_saved_regs + local_vars;
1.3538 ++ case FRAME_POINTER_REGNUM:
1.3539 ++ return call_saved_regs;
1.3540 ++ default:
1.3541 ++ abort ();
1.3542 ++ }
1.3543 ++ case FRAME_POINTER_REGNUM:
1.3544 ++ switch (to)
1.3545 ++ {
1.3546 ++ case STACK_POINTER_REGNUM:
1.3547 ++ return local_vars;
1.3548 ++ default:
1.3549 ++ abort ();
1.3550 ++ }
1.3551 ++ default:
1.3552 ++ abort ();
1.3553 ++ }
1.3554 ++}
1.3555 ++
1.3556 ++
1.3557 ++/*
1.3558 ++ Returns a rtx used when passing the next argument to a function.
1.3559 ++ avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch
1.3560 ++ register to use.
1.3561 ++*/
1.3562 ++rtx
1.3563 ++avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
1.3564 ++ tree type, int named)
1.3565 ++{
1.3566 ++ int index = -1;
1.3567 ++
1.3568 ++ HOST_WIDE_INT arg_size, arg_rsize;
1.3569 ++ if (type)
1.3570 ++ {
1.3571 ++ arg_size = int_size_in_bytes (type);
1.3572 ++ }
1.3573 ++ else
1.3574 ++ {
1.3575 ++ arg_size = GET_MODE_SIZE (mode);
1.3576 ++ }
1.3577 ++ arg_rsize = PUSH_ROUNDING (arg_size);
1.3578 ++
1.3579 ++ /*
1.3580 ++ The last time this macro is called, it is called with mode == VOIDmode,
1.3581 ++ and its result is passed to the call or call_value pattern as operands 2
1.3582 ++ and 3 respectively. */
1.3583 ++ if (mode == VOIDmode)
1.3584 ++ {
1.3585 ++ return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
1.3586 ++ }
1.3587 ++
1.3588 ++ if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
1.3589 ++ {
1.3590 ++ return NULL_RTX;
1.3591 ++ }
1.3592 ++
1.3593 ++ if (arg_rsize == 8)
1.3594 ++ {
1.3595 ++ /* use r11:r10 or r9:r8. */
1.3596 ++ if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
1.3597 ++ index = 1;
1.3598 ++ else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
1.3599 ++ index = 3;
1.3600 ++ else
1.3601 ++ index = -1;
1.3602 ++ }
1.3603 ++ else if (arg_rsize == 4)
1.3604 ++ { /* Use first available register */
1.3605 ++ index = 0;
1.3606 ++ while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
1.3607 ++ index++;
1.3608 ++ if (index > LAST_CUM_REG_INDEX)
1.3609 ++ index = -1;
1.3610 ++ }
1.3611 ++
1.3612 ++ SET_REG_INDEX (cum, index);
1.3613 ++
1.3614 ++ if (GET_REG_INDEX (cum) >= 0)
1.3615 ++ return gen_rtx_REG (mode,
1.3616 ++ avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
1.3617 ++
1.3618 ++ return NULL_RTX;
1.3619 ++}
1.3620 ++
1.3621 ++/*
1.3622 ++ Set the register used for passing the first argument to a function.
1.3623 ++*/
1.3624 ++void
1.3625 ++avr32_init_cumulative_args (CUMULATIVE_ARGS * cum,
1.3626 ++ tree fntype ATTRIBUTE_UNUSED,
1.3627 ++ rtx libname ATTRIBUTE_UNUSED,
1.3628 ++ tree fndecl ATTRIBUTE_UNUSED)
1.3629 ++ {
1.3630 ++ /* Set all registers as unused. */
1.3631 ++ SET_INDEXES_UNUSED (cum);
1.3632 ++
1.3633 ++ /* Reset uses_anonymous_args */
1.3634 ++ cum->uses_anonymous_args = 0;
1.3635 ++
1.3636 ++ /* Reset size of stack pushed arguments */
1.3637 ++ cum->stack_pushed_args_size = 0;
1.3638 ++ }
1.3639 ++
1.3640 ++/*
1.3641 ++ Set register used for passing the next argument to a function. Only the
1.3642 ++ Scratch Registers are used.
1.3643 ++
1.3644 ++ number name
1.3645 ++ 15 r15 PC
1.3646 ++ 14 r14 LR
1.3647 ++ 13 r13 _SP_________
1.3648 ++ FIRST_CUM_REG 12 r12 _||_
1.3649 ++ 10 r11 ||
1.3650 ++ 11 r10 _||_ Scratch Registers
1.3651 ++ 8 r9 ||
1.3652 ++ LAST_SCRATCH_REG 9 r8 _\/_________
1.3653 ++ 6 r7 /\
1.3654 ++ 7 r6 ||
1.3655 ++ 4 r5 ||
1.3656 ++ 5 r4 ||
1.3657 ++ 2 r3 ||
1.3658 ++ 3 r2 ||
1.3659 ++ 0 r1 ||
1.3660 ++ 1 r0 _||_________
1.3661 ++
1.3662 ++*/
1.3663 ++void
1.3664 ++avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
1.3665 ++ tree type, int named ATTRIBUTE_UNUSED)
1.3666 ++{
1.3667 ++ HOST_WIDE_INT arg_size, arg_rsize;
1.3668 ++
1.3669 ++ if (type)
1.3670 ++ {
1.3671 ++ arg_size = int_size_in_bytes (type);
1.3672 ++ }
1.3673 ++ else
1.3674 ++ {
1.3675 ++ arg_size = GET_MODE_SIZE (mode);
1.3676 ++ }
1.3677 ++ arg_rsize = PUSH_ROUNDING (arg_size);
1.3678 ++
1.3679 ++ /* It the argument had to be passed in stack, no register is used. */
1.3680 ++ if ((*targetm.calls.must_pass_in_stack) (mode, type))
1.3681 ++ {
1.3682 ++ cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
1.3683 ++ return;
1.3684 ++ }
1.3685 ++
1.3686 ++ /* Mark the used registers as "used". */
1.3687 ++ if (GET_REG_INDEX (cum) >= 0)
1.3688 ++ {
1.3689 ++ SET_USED_INDEX (cum, GET_REG_INDEX (cum));
1.3690 ++ if (arg_rsize == 8)
1.3691 ++ {
1.3692 ++ SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
1.3693 ++ }
1.3694 ++ }
1.3695 ++ else
1.3696 ++ {
1.3697 ++ /* Had to use stack */
1.3698 ++ cum->stack_pushed_args_size += arg_rsize;
1.3699 ++ }
1.3700 ++}
1.3701 ++
1.3702 ++/*
1.3703 ++ Defines witch direction to go to find the next register to use if the
1.3704 ++ argument is larger then one register or for arguments shorter than an
1.3705 ++ int which is not promoted, such as the last part of structures with
1.3706 ++ size not a multiple of 4. */
1.3707 ++enum direction
1.3708 ++avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
1.3709 ++ tree type)
1.3710 ++{
1.3711 ++ /* Pad upward for all aggregates except byte and halfword sized aggregates
1.3712 ++ which can be passed in registers. */
1.3713 ++ if (type
1.3714 ++ && AGGREGATE_TYPE_P (type)
1.3715 ++ && (int_size_in_bytes (type) != 1)
1.3716 ++ && !((int_size_in_bytes (type) == 2)
1.3717 ++ && TYPE_ALIGN_UNIT (type) >= 2)
1.3718 ++ && (int_size_in_bytes (type) & 0x3))
1.3719 ++ {
1.3720 ++ return upward;
1.3721 ++ }
1.3722 ++
1.3723 ++ return downward;
1.3724 ++}
1.3725 ++
1.3726 ++/*
1.3727 ++ Return a rtx used for the return value from a function call.
1.3728 ++*/
1.3729 ++rtx
1.3730 ++avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED)
1.3731 ++{
1.3732 ++ if (avr32_return_in_memory (type, func))
1.3733 ++ return NULL_RTX;
1.3734 ++
1.3735 ++ if (int_size_in_bytes (type) <= 4)
1.3736 ++ if (avr32_return_in_msb (type))
1.3737 ++ /* Aggregates of size less than a word which does align the data in the
1.3738 ++ MSB must use SImode for r12. */
1.3739 ++ return gen_rtx_REG (SImode, RET_REGISTER);
1.3740 ++ else
1.3741 ++ return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER);
1.3742 ++ else if (int_size_in_bytes (type) <= 8)
1.3743 ++ return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
1.3744 ++
1.3745 ++ return NULL_RTX;
1.3746 ++}
1.3747 ++
1.3748 ++/*
1.3749 ++ Return a rtx used for the return value from a library function call.
1.3750 ++*/
1.3751 ++rtx
1.3752 ++avr32_libcall_value (enum machine_mode mode)
1.3753 ++{
1.3754 ++
1.3755 ++ if (GET_MODE_SIZE (mode) <= 4)
1.3756 ++ return gen_rtx_REG (mode, RET_REGISTER);
1.3757 ++ else if (GET_MODE_SIZE (mode) <= 8)
1.3758 ++ return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
1.3759 ++ else
1.3760 ++ return NULL_RTX;
1.3761 ++}
1.3762 ++
1.3763 ++/* Return TRUE if X references a SYMBOL_REF. */
1.3764 ++int
1.3765 ++symbol_mentioned_p (rtx x)
1.3766 ++{
1.3767 ++ const char *fmt;
1.3768 ++ int i;
1.3769 ++
1.3770 ++ if (GET_CODE (x) == SYMBOL_REF)
1.3771 ++ return 1;
1.3772 ++
1.3773 ++ fmt = GET_RTX_FORMAT (GET_CODE (x));
1.3774 ++
1.3775 ++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1.3776 ++ {
1.3777 ++ if (fmt[i] == 'E')
1.3778 ++ {
1.3779 ++ int j;
1.3780 ++
1.3781 ++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1.3782 ++ if (symbol_mentioned_p (XVECEXP (x, i, j)))
1.3783 ++ return 1;
1.3784 ++ }
1.3785 ++ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
1.3786 ++ return 1;
1.3787 ++ }
1.3788 ++
1.3789 ++ return 0;
1.3790 ++}
1.3791 ++
1.3792 ++/* Return TRUE if X references a LABEL_REF. */
1.3793 ++int
1.3794 ++label_mentioned_p (rtx x)
1.3795 ++{
1.3796 ++ const char *fmt;
1.3797 ++ int i;
1.3798 ++
1.3799 ++ if (GET_CODE (x) == LABEL_REF)
1.3800 ++ return 1;
1.3801 ++
1.3802 ++ fmt = GET_RTX_FORMAT (GET_CODE (x));
1.3803 ++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1.3804 ++ {
1.3805 ++ if (fmt[i] == 'E')
1.3806 ++ {
1.3807 ++ int j;
1.3808 ++
1.3809 ++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1.3810 ++ if (label_mentioned_p (XVECEXP (x, i, j)))
1.3811 ++ return 1;
1.3812 ++ }
1.3813 ++ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
1.3814 ++ return 1;
1.3815 ++ }
1.3816 ++
1.3817 ++ return 0;
1.3818 ++}
1.3819 ++
1.3820 ++/* Return TRUE if X contains a MEM expression. */
1.3821 ++int
1.3822 ++mem_mentioned_p (rtx x)
1.3823 ++{
1.3824 ++ const char *fmt;
1.3825 ++ int i;
1.3826 ++
1.3827 ++ if (MEM_P (x))
1.3828 ++ return 1;
1.3829 ++
1.3830 ++ fmt = GET_RTX_FORMAT (GET_CODE (x));
1.3831 ++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1.3832 ++ {
1.3833 ++ if (fmt[i] == 'E')
1.3834 ++ {
1.3835 ++ int j;
1.3836 ++
1.3837 ++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1.3838 ++ if (mem_mentioned_p (XVECEXP (x, i, j)))
1.3839 ++ return 1;
1.3840 ++ }
1.3841 ++ else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i)))
1.3842 ++ return 1;
1.3843 ++ }
1.3844 ++
1.3845 ++ return 0;
1.3846 ++}
1.3847 ++
1.3848 ++int
1.3849 ++avr32_legitimate_pic_operand_p (rtx x)
1.3850 ++{
1.3851 ++
1.3852 ++ /* We can't have const, this must be broken down to a symbol. */
1.3853 ++ if (GET_CODE (x) == CONST)
1.3854 ++ return FALSE;
1.3855 ++
1.3856 ++ /* Can't access symbols or labels via the constant pool either */
1.3857 ++ if ((GET_CODE (x) == SYMBOL_REF
1.3858 ++ && CONSTANT_POOL_ADDRESS_P (x)
1.3859 ++ && (symbol_mentioned_p (get_pool_constant (x))
1.3860 ++ || label_mentioned_p (get_pool_constant (x)))))
1.3861 ++ return FALSE;
1.3862 ++
1.3863 ++ return TRUE;
1.3864 ++}
1.3865 ++
1.3866 ++
1.3867 ++rtx
1.3868 ++legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
1.3869 ++ rtx reg)
1.3870 ++{
1.3871 ++
1.3872 ++ if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
1.3873 ++ {
1.3874 ++ int subregs = 0;
1.3875 ++
1.3876 ++ if (reg == 0)
1.3877 ++ {
1.3878 ++ if (no_new_pseudos)
1.3879 ++ abort ();
1.3880 ++ else
1.3881 ++ reg = gen_reg_rtx (Pmode);
1.3882 ++
1.3883 ++ subregs = 1;
1.3884 ++ }
1.3885 ++
1.3886 ++ emit_move_insn (reg, orig);
1.3887 ++
1.3888 ++ /* Only set current function as using pic offset table if flag_pic is
1.3889 ++ set. This is because this function is also used if
1.3890 ++ TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
1.3891 ++ if (flag_pic)
1.3892 ++ current_function_uses_pic_offset_table = 1;
1.3893 ++
1.3894 ++ /* Put a REG_EQUAL note on this insn, so that it can be optimized by
1.3895 ++ loop. */
1.3896 ++ return reg;
1.3897 ++ }
1.3898 ++ else if (GET_CODE (orig) == CONST)
1.3899 ++ {
1.3900 ++ rtx base, offset;
1.3901 ++
1.3902 ++ if (flag_pic
1.3903 ++ && GET_CODE (XEXP (orig, 0)) == PLUS
1.3904 ++ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
1.3905 ++ return orig;
1.3906 ++
1.3907 ++ if (reg == 0)
1.3908 ++ {
1.3909 ++ if (no_new_pseudos)
1.3910 ++ abort ();
1.3911 ++ else
1.3912 ++ reg = gen_reg_rtx (Pmode);
1.3913 ++ }
1.3914 ++
1.3915 ++ if (GET_CODE (XEXP (orig, 0)) == PLUS)
1.3916 ++ {
1.3917 ++ base =
1.3918 ++ legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
1.3919 ++ offset =
1.3920 ++ legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
1.3921 ++ base == reg ? 0 : reg);
1.3922 ++ }
1.3923 ++ else
1.3924 ++ abort ();
1.3925 ++
1.3926 ++ if (GET_CODE (offset) == CONST_INT)
1.3927 ++ {
1.3928 ++ /* The base register doesn't really matter, we only want to test
1.3929 ++ the index for the appropriate mode. */
1.3930 ++ if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
1.3931 ++ {
1.3932 ++ if (!no_new_pseudos)
1.3933 ++ offset = force_reg (Pmode, offset);
1.3934 ++ else
1.3935 ++ abort ();
1.3936 ++ }
1.3937 ++
1.3938 ++ if (GET_CODE (offset) == CONST_INT)
1.3939 ++ return plus_constant (base, INTVAL (offset));
1.3940 ++ }
1.3941 ++
1.3942 ++ return gen_rtx_PLUS (Pmode, base, offset);
1.3943 ++ }
1.3944 ++
1.3945 ++ return orig;
1.3946 ++}
1.3947 ++
1.3948 ++/* Generate code to load the PIC register. */
1.3949 ++void
1.3950 ++avr32_load_pic_register (void)
1.3951 ++{
1.3952 ++ rtx l1, pic_tmp;
1.3953 ++ rtx global_offset_table;
1.3954 ++
1.3955 ++ if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
1.3956 ++ return;
1.3957 ++
1.3958 ++ if (!flag_pic)
1.3959 ++ abort ();
1.3960 ++
1.3961 ++ l1 = gen_label_rtx ();
1.3962 ++
1.3963 ++ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1.3964 ++ pic_tmp =
1.3965 ++ gen_rtx_CONST (Pmode,
1.3966 ++ gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
1.3967 ++ global_offset_table));
1.3968 ++ emit_insn (gen_pic_load_addr
1.3969 ++ (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
1.3970 ++ emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
1.3971 ++
1.3972 ++ /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
1.3973 ++ can cause life info to screw up. */
1.3974 ++ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
1.3975 ++}
1.3976 ++
1.3977 ++
1.3978 ++
1.3979 ++/* This hook should return true if values of type type are returned at the most
1.3980 ++ significant end of a register (in other words, if they are padded at the
1.3981 ++ least significant end). You can assume that type is returned in a register;
1.3982 ++ the caller is required to check this. Note that the register provided by
1.3983 ++ FUNCTION_VALUE must be able to hold the complete return value. For example,
1.3984 ++ if a 1-, 2- or 3-byte structure is returned at the most significant end of a
1.3985 ++ 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
1.3986 ++bool
1.3987 ++avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
1.3988 ++{
1.3989 ++ /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
1.3990 ++ ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
1.3991 ++ false; else return true; */
1.3992 ++
1.3993 ++ return false;
1.3994 ++}
1.3995 ++
1.3996 ++
1.3997 ++/*
1.3998 ++ Returns one if a certain function value is going to be returned in memory
1.3999 ++ and zero if it is going to be returned in a register.
1.4000 ++
1.4001 ++ BLKmode and all other modes that is larger than 64 bits are returned in
1.4002 ++ memory.
1.4003 ++*/
1.4004 ++bool
1.4005 ++avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
1.4006 ++{
1.4007 ++ if (TYPE_MODE (type) == VOIDmode)
1.4008 ++ return false;
1.4009 ++
1.4010 ++ if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
1.4011 ++ || int_size_in_bytes (type) == -1)
1.4012 ++ {
1.4013 ++ return true;
1.4014 ++ }
1.4015 ++
1.4016 ++ /* If we have an aggregate then use the same mechanism as when checking if
1.4017 ++ it should be passed on the stack. */
1.4018 ++ if (type
1.4019 ++ && AGGREGATE_TYPE_P (type)
1.4020 ++ && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
1.4021 ++ return true;
1.4022 ++
1.4023 ++ return false;
1.4024 ++}
1.4025 ++
1.4026 ++
1.4027 ++/* Output the constant part of the trampoline.
1.4028 ++ lddpc r0, pc[0x8:e] ; load static chain register
1.4029 ++ lddpc pc, pc[0x8:e] ; jump to subrutine
1.4030 ++ .long 0 ; Address to static chain,
1.4031 ++ ; filled in by avr32_initialize_trampoline()
1.4032 ++ .long 0 ; Address to subrutine,
1.4033 ++ ; filled in by avr32_initialize_trampoline()
1.4034 ++*/
1.4035 ++void
1.4036 ++avr32_trampoline_template (FILE * file)
1.4037 ++{
1.4038 ++ fprintf (file, "\tlddpc r0, pc[8]\n");
1.4039 ++ fprintf (file, "\tlddpc pc, pc[8]\n");
1.4040 ++ /* make room for the address of the static chain. */
1.4041 ++ fprintf (file, "\t.long\t0\n");
1.4042 ++ /* make room for the address to the subrutine. */
1.4043 ++ fprintf (file, "\t.long\t0\n");
1.4044 ++}
1.4045 ++
1.4046 ++
1.4047 ++/*
1.4048 ++ Initialize the variable parts of a trampoline.
1.4049 ++*/
1.4050 ++void
1.4051 ++avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
1.4052 ++{
1.4053 ++ /* Store the address to the static chain. */
1.4054 ++ emit_move_insn (gen_rtx_MEM
1.4055 ++ (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
1.4056 ++ static_chain);
1.4057 ++
1.4058 ++ /* Store the address to the function. */
1.4059 ++ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
1.4060 ++ fnaddr);
1.4061 ++
1.4062 ++ emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
1.4063 ++ gen_rtx_CONST_INT (SImode,
1.4064 ++ AVR32_CACHE_INVALIDATE_ICACHE)));
1.4065 ++}
1.4066 ++
1.4067 ++/* Return nonzero if X is valid as an addressing register. */
1.4068 ++int
1.4069 ++avr32_address_register_rtx_p (rtx x, int strict_p)
1.4070 ++{
1.4071 ++ int regno;
1.4072 ++
1.4073 ++ if (!register_operand(x, GET_MODE(x)))
1.4074 ++ return 0;
1.4075 ++
1.4076 ++ /* If strict we require the register to be a hard register. */
1.4077 ++ if (strict_p
1.4078 ++ && !REG_P(x))
1.4079 ++ return 0;
1.4080 ++
1.4081 ++ regno = REGNO (x);
1.4082 ++
1.4083 ++ if (strict_p)
1.4084 ++ return REGNO_OK_FOR_BASE_P (regno);
1.4085 ++
1.4086 ++ return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
1.4087 ++}
1.4088 ++
1.4089 ++/* Return nonzero if INDEX is valid for an address index operand. */
1.4090 ++int
1.4091 ++avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
1.4092 ++{
1.4093 ++ enum rtx_code code = GET_CODE (index);
1.4094 ++
1.4095 ++ if (GET_MODE_SIZE (mode) > 8)
1.4096 ++ return 0;
1.4097 ++
1.4098 ++ /* Standard coprocessor addressing modes. */
1.4099 ++ if (code == CONST_INT)
1.4100 ++ {
1.4101 ++ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
1.4102 ++ /* Coprocessor mem insns has a smaller reach than ordinary mem insns */
1.4103 ++ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14");
1.4104 ++ else
1.4105 ++ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
1.4106 ++ }
1.4107 ++
1.4108 ++ if (avr32_address_register_rtx_p (index, strict_p))
1.4109 ++ return 1;
1.4110 ++
1.4111 ++ if (code == MULT)
1.4112 ++ {
1.4113 ++ rtx xiop0 = XEXP (index, 0);
1.4114 ++ rtx xiop1 = XEXP (index, 1);
1.4115 ++ return ((avr32_address_register_rtx_p (xiop0, strict_p)
1.4116 ++ && power_of_two_operand (xiop1, SImode)
1.4117 ++ && (INTVAL (xiop1) <= 8))
1.4118 ++ || (avr32_address_register_rtx_p (xiop1, strict_p)
1.4119 ++ && power_of_two_operand (xiop0, SImode)
1.4120 ++ && (INTVAL (xiop0) <= 8)));
1.4121 ++ }
1.4122 ++ else if (code == ASHIFT)
1.4123 ++ {
1.4124 ++ rtx op = XEXP (index, 1);
1.4125 ++
1.4126 ++ return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
1.4127 ++ && GET_CODE (op) == CONST_INT
1.4128 ++ && INTVAL (op) > 0 && INTVAL (op) <= 3);
1.4129 ++ }
1.4130 ++
1.4131 ++ return 0;
1.4132 ++}
1.4133 ++
1.4134 ++/*
1.4135 ++ Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
1.4136 ++ the RTX x is a legitimate memory address.
1.4137 ++
1.4138 ++ Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
1.4139 ++ if it is.
1.4140 ++*/
1.4141 ++
1.4142 ++/* Forward declaration*/
1.4143 ++int is_minipool_label (rtx label);
1.4144 ++
1.4145 ++int
1.4146 ++avr32_legitimate_address (enum machine_mode mode, rtx x, int strict)
1.4147 ++{
1.4148 ++
1.4149 ++ switch (GET_CODE (x))
1.4150 ++ {
1.4151 ++ case REG:
1.4152 ++ return avr32_address_register_rtx_p (x, strict);
1.4153 ++ case CONST_INT:
1.4154 ++ return ((mode==SImode)
1.4155 ++ && CONST_OK_FOR_CONSTRAINT_P(INTVAL(x), 'K', "Ks17"));
1.4156 ++ case CONST:
1.4157 ++ {
1.4158 ++ rtx label = avr32_find_symbol (x);
1.4159 ++ if (label
1.4160 ++ &&
1.4161 ++ ((CONSTANT_POOL_ADDRESS_P (label)
1.4162 ++ && !(flag_pic
1.4163 ++ && (symbol_mentioned_p (get_pool_constant (label))
1.4164 ++ || label_mentioned_p (get_pool_constant (label)))))
1.4165 ++ /* TODO! Can this ever happen??? */
1.4166 ++ || ((GET_CODE (label) == LABEL_REF)
1.4167 ++ && GET_CODE (XEXP (label, 0)) == CODE_LABEL
1.4168 ++ && is_minipool_label (XEXP (label, 0)))
1.4169 ++ /*|| ((GET_CODE (label) == SYMBOL_REF)
1.4170 ++ && mode == SImode
1.4171 ++ && SYMBOL_REF_RMW_ADDR(label))*/))
1.4172 ++ {
1.4173 ++ return TRUE;
1.4174 ++ }
1.4175 ++ }
1.4176 ++ break;
1.4177 ++ case LABEL_REF:
1.4178 ++ if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
1.4179 ++ && is_minipool_label (XEXP (x, 0)))
1.4180 ++ {
1.4181 ++ return TRUE;
1.4182 ++ }
1.4183 ++ break;
1.4184 ++ case SYMBOL_REF:
1.4185 ++ {
1.4186 ++ if (CONSTANT_POOL_ADDRESS_P (x)
1.4187 ++ && !(flag_pic
1.4188 ++ && (symbol_mentioned_p (get_pool_constant (x))
1.4189 ++ || label_mentioned_p (get_pool_constant (x)))))
1.4190 ++ return TRUE;
1.4191 ++ else if (SYMBOL_REF_RCALL_FUNCTION_P (x)
1.4192 ++ || (mode == SImode
1.4193 ++ && SYMBOL_REF_RMW_ADDR (x)))
1.4194 ++ return TRUE;
1.4195 ++ break;
1.4196 ++ }
1.4197 ++ case PRE_DEC: /* (pre_dec (...)) */
1.4198 ++ case POST_INC: /* (post_inc (...)) */
1.4199 ++ return avr32_address_register_rtx_p (XEXP (x, 0), strict);
1.4200 ++ case PLUS: /* (plus (...) (...)) */
1.4201 ++ {
1.4202 ++ rtx xop0 = XEXP (x, 0);
1.4203 ++ rtx xop1 = XEXP (x, 1);
1.4204 ++
1.4205 ++ return ((avr32_address_register_rtx_p (xop0, strict)
1.4206 ++ && avr32_legitimate_index_p (mode, xop1, strict))
1.4207 ++ || (avr32_address_register_rtx_p (xop1, strict)
1.4208 ++ && avr32_legitimate_index_p (mode, xop0, strict)));
1.4209 ++ }
1.4210 ++ default:
1.4211 ++ break;
1.4212 ++ }
1.4213 ++
1.4214 ++ return FALSE;
1.4215 ++}
1.4216 ++
1.4217 ++
1.4218 ++int
1.4219 ++avr32_const_ok_for_move (HOST_WIDE_INT c)
1.4220 ++{
1.4221 ++ if ( TARGET_V2_INSNS )
1.4222 ++ return ( avr32_const_ok_for_constraint_p (c, 'K', "Ks21")
1.4223 ++ /* movh instruction */
1.4224 ++ || avr32_hi16_immediate_operand (GEN_INT(c), VOIDmode) );
1.4225 ++ else
1.4226 ++ return avr32_const_ok_for_constraint_p (c, 'K', "Ks21");
1.4227 ++}
1.4228 ++
1.4229 ++int
1.4230 ++avr32_const_double_immediate (rtx value)
1.4231 ++{
1.4232 ++ HOST_WIDE_INT hi, lo;
1.4233 ++
1.4234 ++ if (GET_CODE (value) != CONST_DOUBLE)
1.4235 ++ return FALSE;
1.4236 ++
1.4237 ++ if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
1.4238 ++ {
1.4239 ++ HOST_WIDE_INT target_float[2];
1.4240 ++ hi = lo = 0;
1.4241 ++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
1.4242 ++ GET_MODE (value));
1.4243 ++ lo = target_float[0];
1.4244 ++ hi = target_float[1];
1.4245 ++ }
1.4246 ++ else
1.4247 ++ {
1.4248 ++ hi = CONST_DOUBLE_HIGH (value);
1.4249 ++ lo = CONST_DOUBLE_LOW (value);
1.4250 ++ }
1.4251 ++
1.4252 ++ if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
1.4253 ++ && (GET_MODE (value) == SFmode
1.4254 ++ || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
1.4255 ++ {
1.4256 ++ return TRUE;
1.4257 ++ }
1.4258 ++
1.4259 ++ return FALSE;
1.4260 ++}
1.4261 ++
1.4262 ++
1.4263 ++int
1.4264 ++avr32_legitimate_constant_p (rtx x)
1.4265 ++{
1.4266 ++ switch (GET_CODE (x))
1.4267 ++ {
1.4268 ++ case CONST_INT:
1.4269 ++ /* Check if we should put large immediate into constant pool
1.4270 ++ or load them directly with mov/orh.*/
1.4271 ++ if (!avr32_imm_in_const_pool)
1.4272 ++ return 1;
1.4273 ++
1.4274 ++ return avr32_const_ok_for_move (INTVAL (x));
1.4275 ++ case CONST_DOUBLE:
1.4276 ++ /* Check if we should put large immediate into constant pool
1.4277 ++ or load them directly with mov/orh.*/
1.4278 ++ if (!avr32_imm_in_const_pool)
1.4279 ++ return 1;
1.4280 ++
1.4281 ++ if (GET_MODE (x) == SFmode
1.4282 ++ || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
1.4283 ++ return avr32_const_double_immediate (x);
1.4284 ++ else
1.4285 ++ return 0;
1.4286 ++ case LABEL_REF:
1.4287 ++ case SYMBOL_REF:
1.4288 ++ return avr32_find_symbol (x) && (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS);
1.4289 ++ case CONST:
1.4290 ++ case HIGH:
1.4291 ++ case CONST_VECTOR:
1.4292 ++ return 0;
1.4293 ++ default:
1.4294 ++ printf ("%s():\n", __FUNCTION__);
1.4295 ++ debug_rtx (x);
1.4296 ++ return 1;
1.4297 ++ }
1.4298 ++}
1.4299 ++
1.4300 ++
1.4301 ++/* Strip any special encoding from labels */
1.4302 ++const char *
1.4303 ++avr32_strip_name_encoding (const char *name)
1.4304 ++{
1.4305 ++ const char *stripped = name;
1.4306 ++
1.4307 ++ while (1)
1.4308 ++ {
1.4309 ++ switch (stripped[0])
1.4310 ++ {
1.4311 ++ case '#':
1.4312 ++ stripped = strchr (name + 1, '#') + 1;
1.4313 ++ break;
1.4314 ++ case '*':
1.4315 ++ stripped = &stripped[1];
1.4316 ++ break;
1.4317 ++ default:
1.4318 ++ return stripped;
1.4319 ++ }
1.4320 ++ }
1.4321 ++}
1.4322 ++
1.4323 ++
1.4324 ++
1.4325 ++/* Do anything needed before RTL is emitted for each function. */
1.4326 ++static struct machine_function *
1.4327 ++avr32_init_machine_status (void)
1.4328 ++{
1.4329 ++ struct machine_function *machine;
1.4330 ++ machine =
1.4331 ++ (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
1.4332 ++
1.4333 ++#if AVR32_FT_UNKNOWN != 0
1.4334 ++ machine->func_type = AVR32_FT_UNKNOWN;
1.4335 ++#endif
1.4336 ++
1.4337 ++ machine->minipool_label_head = 0;
1.4338 ++ machine->minipool_label_tail = 0;
1.4339 ++ machine->ifcvt_after_reload = 0;
1.4340 ++ return machine;
1.4341 ++}
1.4342 ++
1.4343 ++void
1.4344 ++avr32_init_expanders (void)
1.4345 ++{
1.4346 ++ /* Arrange to initialize and mark the machine per-function status. */
1.4347 ++ init_machine_status = avr32_init_machine_status;
1.4348 ++}
1.4349 ++
1.4350 ++
1.4351 ++/* Return an RTX indicating where the return address to the
1.4352 ++ calling function can be found. */
1.4353 ++
1.4354 ++rtx
1.4355 ++avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
1.4356 ++{
1.4357 ++ if (count != 0)
1.4358 ++ return NULL_RTX;
1.4359 ++
1.4360 ++ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
1.4361 ++}
1.4362 ++
1.4363 ++
1.4364 ++void
1.4365 ++avr32_encode_section_info (tree decl, rtx rtl, int first)
1.4366 ++{
1.4367 ++ default_encode_section_info(decl, rtl, first);
1.4368 ++
1.4369 ++ if ( TREE_CODE (decl) == VAR_DECL
1.4370 ++ && (GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
1.4371 ++ && (lookup_attribute ("rmw_addressable", DECL_ATTRIBUTES (decl))
1.4372 ++ || TARGET_RMW_ADDRESSABLE_DATA) ){
1.4373 ++ if ( !TARGET_RMW || flag_pic )
1.4374 ++ return;
1.4375 ++ SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
1.4376 ++ }
1.4377 ++}
1.4378 ++
1.4379 ++void
1.4380 ++avr32_asm_output_label (FILE * stream, const char *name)
1.4381 ++{
1.4382 ++ name = avr32_strip_name_encoding (name);
1.4383 ++
1.4384 ++ /* Print the label. */
1.4385 ++ assemble_name (stream, name);
1.4386 ++ fprintf (stream, ":\n");
1.4387 ++}
1.4388 ++
1.4389 ++
1.4390 ++
1.4391 ++void
1.4392 ++avr32_asm_weaken_label (FILE * stream, const char *name)
1.4393 ++{
1.4394 ++ fprintf (stream, "\t.weak ");
1.4395 ++ assemble_name (stream, name);
1.4396 ++ fprintf (stream, "\n");
1.4397 ++}
1.4398 ++
1.4399 ++/*
1.4400 ++ Checks if a labelref is equal to a reserved word in the assembler. If it is,
1.4401 ++ insert a '_' before the label name.
1.4402 ++*/
1.4403 ++void
1.4404 ++avr32_asm_output_labelref (FILE * stream, const char *name)
1.4405 ++{
1.4406 ++ int verbatim = FALSE;
1.4407 ++ const char *stripped = name;
1.4408 ++ int strip_finished = FALSE;
1.4409 ++
1.4410 ++ while (!strip_finished)
1.4411 ++ {
1.4412 ++ switch (stripped[0])
1.4413 ++ {
1.4414 ++ case '#':
1.4415 ++ stripped = strchr (name + 1, '#') + 1;
1.4416 ++ break;
1.4417 ++ case '*':
1.4418 ++ stripped = &stripped[1];
1.4419 ++ verbatim = TRUE;
1.4420 ++ break;
1.4421 ++ default:
1.4422 ++ strip_finished = TRUE;
1.4423 ++ break;
1.4424 ++ }
1.4425 ++ }
1.4426 ++
1.4427 ++ if (verbatim)
1.4428 ++ fputs (stripped, stream);
1.4429 ++ else
1.4430 ++ asm_fprintf (stream, "%U%s", stripped);
1.4431 ++}
1.4432 ++
1.4433 ++
1.4434 ++
1.4435 ++/*
1.4436 ++ Check if the comparison in compare_exp is redundant
1.4437 ++ for the condition given in next_cond given that the
1.4438 ++ needed flags are already set by an earlier instruction.
1.4439 ++ Uses cc_prev_status to check this.
1.4440 ++
1.4441 ++ Returns NULL_RTX if the compare is not redundant
1.4442 ++ or the new condition to use in the conditional
1.4443 ++ instruction if the compare is redundant.
1.4444 ++*/
1.4445 ++static rtx
1.4446 ++is_compare_redundant (rtx compare_exp, rtx next_cond)
1.4447 ++{
1.4448 ++ int z_flag_valid = FALSE;
1.4449 ++ int n_flag_valid = FALSE;
1.4450 ++ rtx new_cond;
1.4451 ++
1.4452 ++ if (GET_CODE (compare_exp) != COMPARE
1.4453 ++ && GET_CODE (compare_exp) != AND)
1.4454 ++ return NULL_RTX;
1.4455 ++
1.4456 ++
1.4457 ++ if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
1.4458 ++ {
1.4459 ++ /* cc0 already contains the correct comparison -> delete cmp insn */
1.4460 ++ return next_cond;
1.4461 ++ }
1.4462 ++
1.4463 ++ if (GET_MODE (compare_exp) != SImode)
1.4464 ++ return NULL_RTX;
1.4465 ++
1.4466 ++ switch (cc_prev_status.mdep.flags)
1.4467 ++ {
1.4468 ++ case CC_SET_VNCZ:
1.4469 ++ case CC_SET_NCZ:
1.4470 ++ n_flag_valid = TRUE;
1.4471 ++ case CC_SET_CZ:
1.4472 ++ case CC_SET_Z:
1.4473 ++ z_flag_valid = TRUE;
1.4474 ++ }
1.4475 ++
1.4476 ++ if (cc_prev_status.mdep.value
1.4477 ++ && GET_CODE (compare_exp) == COMPARE
1.4478 ++ && REG_P (XEXP (compare_exp, 0))
1.4479 ++ && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
1.4480 ++ && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
1.4481 ++ && next_cond != NULL_RTX)
1.4482 ++ {
1.4483 ++ if (INTVAL (XEXP (compare_exp, 1)) == 0
1.4484 ++ && z_flag_valid
1.4485 ++ && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
1.4486 ++ /* We can skip comparison Z flag is already reflecting ops[0] */
1.4487 ++ return next_cond;
1.4488 ++ else if (n_flag_valid
1.4489 ++ && ((INTVAL (XEXP (compare_exp, 1)) == 0
1.4490 ++ && (GET_CODE (next_cond) == GE
1.4491 ++ || GET_CODE (next_cond) == LT))
1.4492 ++ || (INTVAL (XEXP (compare_exp, 1)) == -1
1.4493 ++ && (GET_CODE (next_cond) == GT
1.4494 ++ || GET_CODE (next_cond) == LE))))
1.4495 ++ {
1.4496 ++ /* We can skip comparison N flag is already reflecting ops[0],
1.4497 ++ which means that we can use the mi/pl conditions to check if
1.4498 ++ ops[0] is GE or LT 0. */
1.4499 ++ if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
1.4500 ++ new_cond =
1.4501 ++ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
1.4502 ++ UNSPEC_COND_PL);
1.4503 ++ else
1.4504 ++ new_cond =
1.4505 ++ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
1.4506 ++ UNSPEC_COND_MI);
1.4507 ++ return new_cond;
1.4508 ++ }
1.4509 ++ }
1.4510 ++ return NULL_RTX;
1.4511 ++}
1.4512 ++
1.4513 ++/* Updates cc_status. */
1.4514 ++void
1.4515 ++avr32_notice_update_cc (rtx exp, rtx insn)
1.4516 ++{
1.4517 ++ enum attr_cc attr_cc = get_attr_cc (insn);
1.4518 ++
1.4519 ++ if ( attr_cc == CC_SET_Z_IF_NOT_V2 )
1.4520 ++ if (TARGET_V2_INSNS)
1.4521 ++ attr_cc = CC_NONE;
1.4522 ++ else
1.4523 ++ attr_cc = CC_SET_Z;
1.4524 ++
1.4525 ++ switch (attr_cc)
1.4526 ++ {
1.4527 ++ case CC_CALL_SET:
1.4528 ++ CC_STATUS_INIT;
1.4529 ++ FPCC_STATUS_INIT;
1.4530 ++ /* Check if the function call returns a value in r12 */
1.4531 ++ if (REG_P (recog_data.operand[0])
1.4532 ++ && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
1.4533 ++ {
1.4534 ++ cc_status.flags = 0;
1.4535 ++ cc_status.mdep.value =
1.4536 ++ gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
1.4537 ++ cc_status.mdep.flags = CC_SET_VNCZ;
1.4538 ++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
1.4539 ++
1.4540 ++ }
1.4541 ++ break;
1.4542 ++ case CC_COMPARE:
1.4543 ++ {
1.4544 ++ /* Check that compare will not be optimized away if so nothing should
1.4545 ++ be done */
1.4546 ++ rtx compare_exp = SET_SRC (exp);
1.4547 ++ /* Check if we have a tst expression. If so convert it to a
1.4548 ++ compare with 0. */
1.4549 ++ if ( REG_P (SET_SRC (exp)) )
1.4550 ++ compare_exp = gen_rtx_COMPARE (GET_MODE (SET_SRC (exp)),
1.4551 ++ SET_SRC (exp),
1.4552 ++ const0_rtx);
1.4553 ++
1.4554 ++ if (is_compare_redundant (compare_exp, get_next_insn_cond (insn)) ==
1.4555 ++ NULL_RTX)
1.4556 ++ {
1.4557 ++
1.4558 ++ /* Reset the nonstandard flag */
1.4559 ++ CC_STATUS_INIT;
1.4560 ++ cc_status.flags = 0;
1.4561 ++ cc_status.mdep.value = compare_exp;
1.4562 ++ cc_status.mdep.flags = CC_SET_VNCZ;
1.4563 ++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
1.4564 ++ }
1.4565 ++ }
1.4566 ++ break;
1.4567 ++ case CC_CMP_COND_INSN:
1.4568 ++ {
1.4569 ++ /* Conditional insn that emit the compare itself. */
1.4570 ++ rtx cmp;
1.4571 ++ rtx cmp_op0, cmp_op1;
1.4572 ++ rtx cond;
1.4573 ++ rtx dest;
1.4574 ++ rtx next_insn = next_nonnote_insn (insn);
1.4575 ++
1.4576 ++ if ( GET_CODE (exp) == COND_EXEC )
1.4577 ++ {
1.4578 ++ cmp_op0 = XEXP (COND_EXEC_TEST (exp), 0);
1.4579 ++ cmp_op1 = XEXP (COND_EXEC_TEST (exp), 1);
1.4580 ++ cond = COND_EXEC_TEST (exp);
1.4581 ++ dest = SET_DEST (COND_EXEC_CODE (exp));
1.4582 ++ }
1.4583 ++ else
1.4584 ++ {
1.4585 ++ /* If then else conditional. compare operands are in operands
1.4586 ++ 4 and 5. */
1.4587 ++ cmp_op0 = recog_data.operand[4];
1.4588 ++ cmp_op1 = recog_data.operand[5];
1.4589 ++ cond = recog_data.operand[1];
1.4590 ++ dest = SET_DEST (exp);
1.4591 ++ }
1.4592 ++
1.4593 ++ if ( GET_CODE (cmp_op0) == AND )
1.4594 ++ cmp = cmp_op0;
1.4595 ++ else
1.4596 ++ cmp = gen_rtx_COMPARE (GET_MODE (cmp_op0),
1.4597 ++ cmp_op0,
1.4598 ++ cmp_op1);
1.4599 ++
1.4600 ++ /* Check if the conditional insns updates a register present
1.4601 ++ in the comparison, if so then we must reset the cc_status. */
1.4602 ++ if (REG_P (dest)
1.4603 ++ && (reg_mentioned_p (dest, cmp_op0)
1.4604 ++ || reg_mentioned_p (dest, cmp_op1))
1.4605 ++ && GET_CODE (exp) != COND_EXEC )
1.4606 ++ {
1.4607 ++ CC_STATUS_INIT;
1.4608 ++ }
1.4609 ++ else if (is_compare_redundant (cmp, cond) == NULL_RTX)
1.4610 ++ {
1.4611 ++ /* Reset the nonstandard flag */
1.4612 ++ CC_STATUS_INIT;
1.4613 ++ if ( GET_CODE (cmp_op0) == AND )
1.4614 ++ {
1.4615 ++ cc_status.flags = CC_INVERTED;
1.4616 ++ cc_status.mdep.flags = CC_SET_Z;
1.4617 ++ }
1.4618 ++ else
1.4619 ++ {
1.4620 ++ cc_status.flags = 0;
1.4621 ++ cc_status.mdep.flags = CC_SET_VNCZ;
1.4622 ++ }
1.4623 ++ cc_status.mdep.value = cmp;
1.4624 ++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
1.4625 ++ }
1.4626 ++
1.4627 ++
1.4628 ++ /* Check if we have a COND_EXEC insn which updates one
1.4629 ++ of the registers in the compare status. */
1.4630 ++ if (REG_P (dest)
1.4631 ++ && (reg_mentioned_p (dest, cmp_op0)
1.4632 ++ || reg_mentioned_p (dest, cmp_op1))
1.4633 ++ && GET_CODE (exp) == COND_EXEC )
1.4634 ++ cc_status.mdep.cond_exec_cmp_clobbered = 1;
1.4635 ++
1.4636 ++ if ( cc_status.mdep.cond_exec_cmp_clobbered
1.4637 ++ && GET_CODE (exp) == COND_EXEC
1.4638 ++ && next_insn != NULL
1.4639 ++ && INSN_P (next_insn)
1.4640 ++ && !(GET_CODE (PATTERN (next_insn)) == COND_EXEC
1.4641 ++ && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), cmp_op0)
1.4642 ++ && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1), cmp_op1)
1.4643 ++ && (GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == GET_CODE (cond)
1.4644 ++ || GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == reverse_condition (GET_CODE (cond)))) )
1.4645 ++ {
1.4646 ++ /* We have a sequence of conditional insns where the compare status has been clobbered
1.4647 ++ since the compare no longer reflects the content of the values to compare. */
1.4648 ++ CC_STATUS_INIT;
1.4649 ++ cc_status.mdep.cond_exec_cmp_clobbered = 1;
1.4650 ++ }
1.4651 ++
1.4652 ++ }
1.4653 ++ break;
1.4654 ++ case CC_FPCOMPARE:
1.4655 ++ /* Check that floating-point compare will not be optimized away if so
1.4656 ++ nothing should be done */
1.4657 ++ if (!rtx_equal_p (cc_prev_status.mdep.fpvalue, SET_SRC (exp)))
1.4658 ++ {
1.4659 ++ /* cc0 already contains the correct comparison -> delete cmp insn */
1.4660 ++ /* Reset the nonstandard flag */
1.4661 ++ cc_status.mdep.fpvalue = SET_SRC (exp);
1.4662 ++ cc_status.mdep.fpflags = CC_SET_CZ;
1.4663 ++ }
1.4664 ++ break;
1.4665 ++ case CC_FROM_FPCC:
1.4666 ++ /* Flags are updated with flags from Floating-point coprocessor, set
1.4667 ++ CC_NOT_SIGNED flag since the flags are set so that unsigned
1.4668 ++ condidion codes can be used directly. */
1.4669 ++ CC_STATUS_INIT;
1.4670 ++ cc_status.flags = CC_NOT_SIGNED;
1.4671 ++ cc_status.mdep.value = cc_status.mdep.fpvalue;
1.4672 ++ cc_status.mdep.flags = cc_status.mdep.fpflags;
1.4673 ++ break;
1.4674 ++ case CC_BLD:
1.4675 ++ /* Bit load is kind of like an inverted testsi, because the Z flag is
1.4676 ++ inverted */
1.4677 ++ CC_STATUS_INIT;
1.4678 ++ cc_status.flags = CC_INVERTED;
1.4679 ++ cc_status.mdep.value = SET_SRC (exp);
1.4680 ++ cc_status.mdep.flags = CC_SET_Z;
1.4681 ++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
1.4682 ++ break;
1.4683 ++ case CC_NONE:
1.4684 ++ /* Insn does not affect CC at all. Check if the instruction updates
1.4685 ++ some of the register currently reflected in cc0 */
1.4686 ++
1.4687 ++ if ((GET_CODE (exp) == SET)
1.4688 ++ && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
1.4689 ++ && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
1.4690 ++ || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
1.4691 ++ || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
1.4692 ++ {
1.4693 ++ CC_STATUS_INIT;
1.4694 ++ }
1.4695 ++
1.4696 ++ /* If this is a parallel we must step through each of the parallel
1.4697 ++ expressions */
1.4698 ++ if (GET_CODE (exp) == PARALLEL)
1.4699 ++ {
1.4700 ++ int i;
1.4701 ++ for (i = 0; i < XVECLEN (exp, 0); ++i)
1.4702 ++ {
1.4703 ++ rtx vec_exp = XVECEXP (exp, 0, i);
1.4704 ++ if ((GET_CODE (vec_exp) == SET)
1.4705 ++ && (cc_status.value1 || cc_status.value2
1.4706 ++ || cc_status.mdep.value)
1.4707 ++ && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
1.4708 ++ || reg_mentioned_p (SET_DEST (vec_exp),
1.4709 ++ cc_status.value2)
1.4710 ++ || reg_mentioned_p (SET_DEST (vec_exp),
1.4711 ++ cc_status.mdep.value)))
1.4712 ++ {
1.4713 ++ CC_STATUS_INIT;
1.4714 ++ }
1.4715 ++ }
1.4716 ++ }
1.4717 ++
1.4718 ++ /* Check if we have memory opartions with post_inc or pre_dec on the
1.4719 ++ register currently reflected in cc0 */
1.4720 ++ if (GET_CODE (exp) == SET
1.4721 ++ && GET_CODE (SET_SRC (exp)) == MEM
1.4722 ++ && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
1.4723 ++ || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
1.4724 ++ &&
1.4725 ++ (reg_mentioned_p
1.4726 ++ (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
1.4727 ++ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
1.4728 ++ cc_status.value2)
1.4729 ++ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
1.4730 ++ cc_status.mdep.value)))
1.4731 ++ CC_STATUS_INIT;
1.4732 ++
1.4733 ++ if (GET_CODE (exp) == SET
1.4734 ++ && GET_CODE (SET_DEST (exp)) == MEM
1.4735 ++ && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
1.4736 ++ || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
1.4737 ++ &&
1.4738 ++ (reg_mentioned_p
1.4739 ++ (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
1.4740 ++ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
1.4741 ++ cc_status.value2)
1.4742 ++ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
1.4743 ++ cc_status.mdep.value)))
1.4744 ++ CC_STATUS_INIT;
1.4745 ++ break;
1.4746 ++
1.4747 ++ case CC_SET_VNCZ:
1.4748 ++ CC_STATUS_INIT;
1.4749 ++ cc_status.mdep.value = recog_data.operand[0];
1.4750 ++ cc_status.mdep.flags = CC_SET_VNCZ;
1.4751 ++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
1.4752 ++ break;
1.4753 ++
1.4754 ++ case CC_SET_NCZ:
1.4755 ++ CC_STATUS_INIT;
1.4756 ++ cc_status.mdep.value = recog_data.operand[0];
1.4757 ++ cc_status.mdep.flags = CC_SET_NCZ;
1.4758 ++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
1.4759 ++ break;
1.4760 ++
1.4761 ++ case CC_SET_CZ:
1.4762 ++ CC_STATUS_INIT;
1.4763 ++ cc_status.mdep.value = recog_data.operand[0];
1.4764 ++ cc_status.mdep.flags = CC_SET_CZ;
1.4765 ++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
1.4766 ++ break;
1.4767 ++
1.4768 ++ case CC_SET_Z:
1.4769 ++ CC_STATUS_INIT;
1.4770 ++ cc_status.mdep.value = recog_data.operand[0];
1.4771 ++ cc_status.mdep.flags = CC_SET_Z;
1.4772 ++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
1.4773 ++ break;
1.4774 ++
1.4775 ++ case CC_CLOBBER:
1.4776 ++ CC_STATUS_INIT;
1.4777 ++ break;
1.4778 ++
1.4779 ++ default:
1.4780 ++ CC_STATUS_INIT;
1.4781 ++ }
1.4782 ++}
1.4783 ++
1.4784 ++
1.4785 ++/*
1.4786 ++ Outputs to stdio stream stream the assembler syntax for an instruction
1.4787 ++ operand x. x is an RTL expression.
1.4788 ++*/
1.4789 ++void
1.4790 ++avr32_print_operand (FILE * stream, rtx x, int code)
1.4791 ++{
1.4792 ++ int error = 0;
1.4793 ++
1.4794 ++ if ( code == '?' )
1.4795 ++ {
1.4796 ++ /* Predicable instruction, print condition code */
1.4797 ++
1.4798 ++ /* If the insn should not be conditional then do nothing. */
1.4799 ++ if ( current_insn_predicate == NULL_RTX )
1.4800 ++ return;
1.4801 ++
1.4802 ++ /* Set x to the predicate to force printing
1.4803 ++ the condition later on. */
1.4804 ++ x = current_insn_predicate;
1.4805 ++
1.4806 ++ /* Reverse condition if useing bld insn. */
1.4807 ++ if ( GET_CODE (XEXP(current_insn_predicate,0)) == AND )
1.4808 ++ x = reversed_condition (current_insn_predicate);
1.4809 ++ }
1.4810 ++ else if ( code == '!' )
1.4811 ++ {
1.4812 ++ /* Output compare for conditional insn if needed. */
1.4813 ++ rtx new_cond;
1.4814 ++ gcc_assert ( current_insn_predicate != NULL_RTX );
1.4815 ++ new_cond = avr32_output_cmp(current_insn_predicate,
1.4816 ++ GET_MODE(XEXP(current_insn_predicate,0)),
1.4817 ++ XEXP(current_insn_predicate,0),
1.4818 ++ XEXP(current_insn_predicate,1));
1.4819 ++
1.4820 ++ /* Check if the new condition is a special avr32 condition
1.4821 ++ specified using UNSPECs. If so we must handle it differently. */
1.4822 ++ if ( GET_CODE (new_cond) == UNSPEC )
1.4823 ++ {
1.4824 ++ current_insn_predicate =
1.4825 ++ gen_rtx_UNSPEC (CCmode,
1.4826 ++ gen_rtvec (2,
1.4827 ++ XEXP(current_insn_predicate,0),
1.4828 ++ XEXP(current_insn_predicate,1)),
1.4829 ++ XINT (new_cond, 1));
1.4830 ++ }
1.4831 ++ else
1.4832 ++ {
1.4833 ++ PUT_CODE(current_insn_predicate, GET_CODE(new_cond));
1.4834 ++ }
1.4835 ++ return;
1.4836 ++ }
1.4837 ++
1.4838 ++ switch (GET_CODE (x))
1.4839 ++ {
1.4840 ++ case UNSPEC:
1.4841 ++ switch (XINT (x, 1))
1.4842 ++ {
1.4843 ++ case UNSPEC_COND_PL:
1.4844 ++ if (code == 'i')
1.4845 ++ fputs ("mi", stream);
1.4846 ++ else
1.4847 ++ fputs ("pl", stream);
1.4848 ++ break;
1.4849 ++ case UNSPEC_COND_MI:
1.4850 ++ if (code == 'i')
1.4851 ++ fputs ("pl", stream);
1.4852 ++ else
1.4853 ++ fputs ("mi", stream);
1.4854 ++ break;
1.4855 ++ default:
1.4856 ++ error = 1;
1.4857 ++ }
1.4858 ++ break;
1.4859 ++ case EQ:
1.4860 ++ if (code == 'i')
1.4861 ++ fputs ("ne", stream);
1.4862 ++ else
1.4863 ++ fputs ("eq", stream);
1.4864 ++ break;
1.4865 ++ case NE:
1.4866 ++ if (code == 'i')
1.4867 ++ fputs ("eq", stream);
1.4868 ++ else
1.4869 ++ fputs ("ne", stream);
1.4870 ++ break;
1.4871 ++ case GT:
1.4872 ++ if (code == 'i')
1.4873 ++ fputs ("le", stream);
1.4874 ++ else
1.4875 ++ fputs ("gt", stream);
1.4876 ++ break;
1.4877 ++ case GTU:
1.4878 ++ if (code == 'i')
1.4879 ++ fputs ("ls", stream);
1.4880 ++ else
1.4881 ++ fputs ("hi", stream);
1.4882 ++ break;
1.4883 ++ case LT:
1.4884 ++ if (code == 'i')
1.4885 ++ fputs ("ge", stream);
1.4886 ++ else
1.4887 ++ fputs ("lt", stream);
1.4888 ++ break;
1.4889 ++ case LTU:
1.4890 ++ if (code == 'i')
1.4891 ++ fputs ("hs", stream);
1.4892 ++ else
1.4893 ++ fputs ("lo", stream);
1.4894 ++ break;
1.4895 ++ case GE:
1.4896 ++ if (code == 'i')
1.4897 ++ fputs ("lt", stream);
1.4898 ++ else
1.4899 ++ fputs ("ge", stream);
1.4900 ++ break;
1.4901 ++ case GEU:
1.4902 ++ if (code == 'i')
1.4903 ++ fputs ("lo", stream);
1.4904 ++ else
1.4905 ++ fputs ("hs", stream);
1.4906 ++ break;
1.4907 ++ case LE:
1.4908 ++ if (code == 'i')
1.4909 ++ fputs ("gt", stream);
1.4910 ++ else
1.4911 ++ fputs ("le", stream);
1.4912 ++ break;
1.4913 ++ case LEU:
1.4914 ++ if (code == 'i')
1.4915 ++ fputs ("hi", stream);
1.4916 ++ else
1.4917 ++ fputs ("ls", stream);
1.4918 ++ break;
1.4919 ++ case CONST_INT:
1.4920 ++ {
1.4921 ++ HOST_WIDE_INT value = INTVAL (x);
1.4922 ++
1.4923 ++ switch (code)
1.4924 ++ {
1.4925 ++ case 'm':
1.4926 ++ if ( HOST_BITS_PER_WIDE_INT > BITS_PER_WORD )
1.4927 ++ {
1.4928 ++ /* A const_int can be used to represent DImode constants. */
1.4929 ++ value >>= BITS_PER_WORD;
1.4930 ++ }
1.4931 ++ /* We might get a const_int immediate for setting a DI register,
1.4932 ++ we then must then return the correct sign extended DI. The most
1.4933 ++ significant word is just a sign extension. */
1.4934 ++ else if (value < 0)
1.4935 ++ value = -1;
1.4936 ++ else
1.4937 ++ value = 0;
1.4938 ++ break;
1.4939 ++ case 'i':
1.4940 ++ value++;
1.4941 ++ break;
1.4942 ++ case 'p':
1.4943 ++ {
1.4944 ++ /* Set to bit position of first bit set in immediate */
1.4945 ++ int i, bitpos = 32;
1.4946 ++ for (i = 0; i < 32; i++)
1.4947 ++ if (value & (1 << i))
1.4948 ++ {
1.4949 ++ bitpos = i;
1.4950 ++ break;
1.4951 ++ }
1.4952 ++ value = bitpos;
1.4953 ++ }
1.4954 ++ break;
1.4955 ++ case 'z':
1.4956 ++ {
1.4957 ++ /* Set to bit position of first bit cleared in immediate */
1.4958 ++ int i, bitpos = 32;
1.4959 ++ for (i = 0; i < 32; i++)
1.4960 ++ if (!(value & (1 << i)))
1.4961 ++ {
1.4962 ++ bitpos = i;
1.4963 ++ break;
1.4964 ++ }
1.4965 ++ value = bitpos;
1.4966 ++ }
1.4967 ++ break;
1.4968 ++ case 'r':
1.4969 ++ {
1.4970 ++ /* Reglist 8 */
1.4971 ++ char op[50];
1.4972 ++ op[0] = '\0';
1.4973 ++
1.4974 ++ if (value & 0x01)
1.4975 ++ sprintf (op, "r0-r3");
1.4976 ++ if (value & 0x02)
1.4977 ++ strlen (op) ? sprintf (op, "%s, r4-r7", op) : sprintf (op,
1.4978 ++ "r4-r7");
1.4979 ++ if (value & 0x04)
1.4980 ++ strlen (op) ? sprintf (op, "%s, r8-r9", op) : sprintf (op,
1.4981 ++ "r8-r9");
1.4982 ++ if (value & 0x08)
1.4983 ++ strlen (op) ? sprintf (op, "%s, r10", op) : sprintf (op,
1.4984 ++ "r10");
1.4985 ++ if (value & 0x10)
1.4986 ++ strlen (op) ? sprintf (op, "%s, r11", op) : sprintf (op,
1.4987 ++ "r11");
1.4988 ++ if (value & 0x20)
1.4989 ++ strlen (op) ? sprintf (op, "%s, r12", op) : sprintf (op,
1.4990 ++ "r12");
1.4991 ++ if (value & 0x40)
1.4992 ++ strlen (op) ? sprintf (op, "%s, lr", op) : sprintf (op, "lr");
1.4993 ++ if (value & 0x80)
1.4994 ++ strlen (op) ? sprintf (op, "%s, pc", op) : sprintf (op, "pc");
1.4995 ++
1.4996 ++ fputs (op, stream);
1.4997 ++ return;
1.4998 ++ }
1.4999 ++ case 's':
1.5000 ++ {
1.5001 ++ /* Reglist 16 */
1.5002 ++ char reglist16_string[100];
1.5003 ++ int i;
1.5004 ++ reglist16_string[0] = '\0';
1.5005 ++
1.5006 ++ for (i = 0; i < 16; ++i)
1.5007 ++ {
1.5008 ++ if (value & (1 << i))
1.5009 ++ {
1.5010 ++ strlen (reglist16_string) ? sprintf (reglist16_string,
1.5011 ++ "%s, %s",
1.5012 ++ reglist16_string,
1.5013 ++ reg_names
1.5014 ++ [INTERNAL_REGNUM
1.5015 ++ (i)]) :
1.5016 ++ sprintf (reglist16_string, "%s",
1.5017 ++ reg_names[INTERNAL_REGNUM (i)]);
1.5018 ++ }
1.5019 ++ }
1.5020 ++ fputs (reglist16_string, stream);
1.5021 ++ return;
1.5022 ++ }
1.5023 ++ case 'C':
1.5024 ++ {
1.5025 ++ /* RegListCP8 */
1.5026 ++ char reglist_string[100];
1.5027 ++ avr32_make_fp_reglist_w (value, (char *) reglist_string);
1.5028 ++ fputs (reglist_string, stream);
1.5029 ++ return;
1.5030 ++ }
1.5031 ++ case 'D':
1.5032 ++ {
1.5033 ++ /* RegListCPD8 */
1.5034 ++ char reglist_string[100];
1.5035 ++ avr32_make_fp_reglist_d (value, (char *) reglist_string);
1.5036 ++ fputs (reglist_string, stream);
1.5037 ++ return;
1.5038 ++ }
1.5039 ++ case 'h':
1.5040 ++ /* Print halfword part of word */
1.5041 ++ fputs (value ? "b" : "t", stream);
1.5042 ++ return;
1.5043 ++ }
1.5044 ++
1.5045 ++ /* Print Value */
1.5046 ++ fprintf (stream, "%d", value);
1.5047 ++ break;
1.5048 ++ }
1.5049 ++ case CONST_DOUBLE:
1.5050 ++ {
1.5051 ++ HOST_WIDE_INT hi, lo;
1.5052 ++ if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
1.5053 ++ {
1.5054 ++ HOST_WIDE_INT target_float[2];
1.5055 ++ hi = lo = 0;
1.5056 ++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
1.5057 ++ GET_MODE (x));
1.5058 ++ /* For doubles the most significant part starts at index 0. */
1.5059 ++ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
1.5060 ++ {
1.5061 ++ hi = target_float[0];
1.5062 ++ lo = target_float[1];
1.5063 ++ }
1.5064 ++ else
1.5065 ++ {
1.5066 ++ lo = target_float[0];
1.5067 ++ }
1.5068 ++ }
1.5069 ++ else
1.5070 ++ {
1.5071 ++ hi = CONST_DOUBLE_HIGH (x);
1.5072 ++ lo = CONST_DOUBLE_LOW (x);
1.5073 ++ }
1.5074 ++
1.5075 ++ if (code == 'm')
1.5076 ++ fprintf (stream, "%ld", hi);
1.5077 ++ else
1.5078 ++ fprintf (stream, "%ld", lo);
1.5079 ++
1.5080 ++ break;
1.5081 ++ }
1.5082 ++ case CONST:
1.5083 ++ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
1.5084 ++ fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
1.5085 ++ break;
1.5086 ++ case REG:
1.5087 ++ /* Swap register name if the register is DImode or DFmode. */
1.5088 ++ if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
1.5089 ++ {
1.5090 ++ /* Double register must have an even numbered address */
1.5091 ++ gcc_assert (!(REGNO (x) % 2));
1.5092 ++ if (code == 'm')
1.5093 ++ fputs (reg_names[true_regnum (x)], stream);
1.5094 ++ else
1.5095 ++ fputs (reg_names[true_regnum (x) + 1], stream);
1.5096 ++ }
1.5097 ++ else if (GET_MODE (x) == TImode)
1.5098 ++ {
1.5099 ++ switch (code)
1.5100 ++ {
1.5101 ++ case 'T':
1.5102 ++ fputs (reg_names[true_regnum (x)], stream);
1.5103 ++ break;
1.5104 ++ case 'U':
1.5105 ++ fputs (reg_names[true_regnum (x) + 1], stream);
1.5106 ++ break;
1.5107 ++ case 'L':
1.5108 ++ fputs (reg_names[true_regnum (x) + 2], stream);
1.5109 ++ break;
1.5110 ++ case 'B':
1.5111 ++ fputs (reg_names[true_regnum (x) + 3], stream);
1.5112 ++ break;
1.5113 ++ default:
1.5114 ++ fprintf (stream, "%s, %s, %s, %s",
1.5115 ++ reg_names[true_regnum (x) + 3],
1.5116 ++ reg_names[true_regnum (x) + 2],
1.5117 ++ reg_names[true_regnum (x) + 1],
1.5118 ++ reg_names[true_regnum (x)]);
1.5119 ++ break;
1.5120 ++ }
1.5121 ++ }
1.5122 ++ else
1.5123 ++ {
1.5124 ++ fputs (reg_names[true_regnum (x)], stream);
1.5125 ++ }
1.5126 ++ break;
1.5127 ++ case CODE_LABEL:
1.5128 ++ case LABEL_REF:
1.5129 ++ case SYMBOL_REF:
1.5130 ++ output_addr_const (stream, x);
1.5131 ++ break;
1.5132 ++ case MEM:
1.5133 ++ switch (GET_CODE (XEXP (x, 0)))
1.5134 ++ {
1.5135 ++ case LABEL_REF:
1.5136 ++ case SYMBOL_REF:
1.5137 ++ output_addr_const (stream, XEXP (x, 0));
1.5138 ++ break;
1.5139 ++ case MEM:
1.5140 ++ switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
1.5141 ++ {
1.5142 ++ case SYMBOL_REF:
1.5143 ++ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
1.5144 ++ break;
1.5145 ++ default:
1.5146 ++ error = 1;
1.5147 ++ break;
1.5148 ++ }
1.5149 ++ break;
1.5150 ++ case REG:
1.5151 ++ avr32_print_operand (stream, XEXP (x, 0), 0);
1.5152 ++ if (code != 'p')
1.5153 ++ fputs ("[0]", stream);
1.5154 ++ break;
1.5155 ++ case PRE_DEC:
1.5156 ++ fputs ("--", stream);
1.5157 ++ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
1.5158 ++ break;
1.5159 ++ case POST_INC:
1.5160 ++ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
1.5161 ++ fputs ("++", stream);
1.5162 ++ break;
1.5163 ++ case PLUS:
1.5164 ++ {
1.5165 ++ rtx op0 = XEXP (XEXP (x, 0), 0);
1.5166 ++ rtx op1 = XEXP (XEXP (x, 0), 1);
1.5167 ++ rtx base = NULL_RTX, offset = NULL_RTX;
1.5168 ++
1.5169 ++ if (avr32_address_register_rtx_p (op0, 1))
1.5170 ++ {
1.5171 ++ base = op0;
1.5172 ++ offset = op1;
1.5173 ++ }
1.5174 ++ else if (avr32_address_register_rtx_p (op1, 1))
1.5175 ++ {
1.5176 ++ /* Operands are switched. */
1.5177 ++ base = op1;
1.5178 ++ offset = op0;
1.5179 ++ }
1.5180 ++
1.5181 ++ gcc_assert (base && offset
1.5182 ++ && avr32_address_register_rtx_p (base, 1)
1.5183 ++ && avr32_legitimate_index_p (GET_MODE (x), offset,
1.5184 ++ 1));
1.5185 ++
1.5186 ++ avr32_print_operand (stream, base, 0);
1.5187 ++ fputs ("[", stream);
1.5188 ++ avr32_print_operand (stream, offset, 0);
1.5189 ++ fputs ("]", stream);
1.5190 ++ break;
1.5191 ++ }
1.5192 ++ case CONST:
1.5193 ++ output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
1.5194 ++ fprintf (stream, " + %ld",
1.5195 ++ INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
1.5196 ++ break;
1.5197 ++ case CONST_INT:
1.5198 ++ avr32_print_operand (stream, XEXP (x, 0), 0);
1.5199 ++ break;
1.5200 ++ default:
1.5201 ++ error = 1;
1.5202 ++ }
1.5203 ++ break;
1.5204 ++ case MULT:
1.5205 ++ {
1.5206 ++ int value = INTVAL (XEXP (x, 1));
1.5207 ++
1.5208 ++ /* Convert immediate in multiplication into a shift immediate */
1.5209 ++ switch (value)
1.5210 ++ {
1.5211 ++ case 2:
1.5212 ++ value = 1;
1.5213 ++ break;
1.5214 ++ case 4:
1.5215 ++ value = 2;
1.5216 ++ break;
1.5217 ++ case 8:
1.5218 ++ value = 3;
1.5219 ++ break;
1.5220 ++ default:
1.5221 ++ value = 0;
1.5222 ++ }
1.5223 ++ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
1.5224 ++ value);
1.5225 ++ break;
1.5226 ++ }
1.5227 ++ case ASHIFT:
1.5228 ++ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
1.5229 ++ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
1.5230 ++ (int) INTVAL (XEXP (x, 1)));
1.5231 ++ else if (REG_P (XEXP (x, 1)))
1.5232 ++ fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
1.5233 ++ reg_names[true_regnum (XEXP (x, 1))]);
1.5234 ++ else
1.5235 ++ {
1.5236 ++ error = 1;
1.5237 ++ }
1.5238 ++ break;
1.5239 ++ case LSHIFTRT:
1.5240 ++ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
1.5241 ++ fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
1.5242 ++ (int) INTVAL (XEXP (x, 1)));
1.5243 ++ else if (REG_P (XEXP (x, 1)))
1.5244 ++ fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
1.5245 ++ reg_names[true_regnum (XEXP (x, 1))]);
1.5246 ++ else
1.5247 ++ {
1.5248 ++ error = 1;
1.5249 ++ }
1.5250 ++ fprintf (stream, ">>");
1.5251 ++ break;
1.5252 ++ case PARALLEL:
1.5253 ++ {
1.5254 ++ /* Load store multiple */
1.5255 ++ int i;
1.5256 ++ int count = XVECLEN (x, 0);
1.5257 ++ int reglist16 = 0;
1.5258 ++ char reglist16_string[100];
1.5259 ++
1.5260 ++ for (i = 0; i < count; ++i)
1.5261 ++ {
1.5262 ++ rtx vec_elm = XVECEXP (x, 0, i);
1.5263 ++ if (GET_MODE (vec_elm) != SET)
1.5264 ++ {
1.5265 ++ debug_rtx (vec_elm);
1.5266 ++ internal_error ("Unknown element in parallel expression!");
1.5267 ++ }
1.5268 ++ if (GET_MODE (XEXP (vec_elm, 0)) == REG)
1.5269 ++ {
1.5270 ++ /* Load multiple */
1.5271 ++ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
1.5272 ++ }
1.5273 ++ else
1.5274 ++ {
1.5275 ++ /* Store multiple */
1.5276 ++ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
1.5277 ++ }
1.5278 ++ }
1.5279 ++
1.5280 ++ avr32_make_reglist16 (reglist16, reglist16_string);
1.5281 ++ fputs (reglist16_string, stream);
1.5282 ++
1.5283 ++ break;
1.5284 ++ }
1.5285 ++
1.5286 ++ case PLUS:
1.5287 ++ {
1.5288 ++ rtx op0 = XEXP (x, 0);
1.5289 ++ rtx op1 = XEXP (x, 1);
1.5290 ++ rtx base = NULL_RTX, offset = NULL_RTX;
1.5291 ++
1.5292 ++ if (avr32_address_register_rtx_p (op0, 1))
1.5293 ++ {
1.5294 ++ base = op0;
1.5295 ++ offset = op1;
1.5296 ++ }
1.5297 ++ else if (avr32_address_register_rtx_p (op1, 1))
1.5298 ++ {
1.5299 ++ /* Operands are switched. */
1.5300 ++ base = op1;
1.5301 ++ offset = op0;
1.5302 ++ }
1.5303 ++
1.5304 ++ gcc_assert (base && offset
1.5305 ++ && avr32_address_register_rtx_p (base, 1)
1.5306 ++ && avr32_legitimate_index_p (GET_MODE (x), offset, 1));
1.5307 ++
1.5308 ++ avr32_print_operand (stream, base, 0);
1.5309 ++ fputs ("[", stream);
1.5310 ++ avr32_print_operand (stream, offset, 0);
1.5311 ++ fputs ("]", stream);
1.5312 ++ break;
1.5313 ++ }
1.5314 ++
1.5315 ++ default:
1.5316 ++ error = 1;
1.5317 ++ }
1.5318 ++
1.5319 ++ if (error)
1.5320 ++ {
1.5321 ++ debug_rtx (x);
1.5322 ++ internal_error ("Illegal expression for avr32_print_operand");
1.5323 ++ }
1.5324 ++}
1.5325 ++
1.5326 ++rtx
1.5327 ++avr32_get_note_reg_equiv (rtx insn)
1.5328 ++{
1.5329 ++ rtx note;
1.5330 ++
1.5331 ++ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
1.5332 ++
1.5333 ++ if (note != NULL_RTX)
1.5334 ++ return XEXP (note, 0);
1.5335 ++ else
1.5336 ++ return NULL_RTX;
1.5337 ++}
1.5338 ++
1.5339 ++/*
1.5340 ++ Outputs to stdio stream stream the assembler syntax for an instruction
1.5341 ++ operand that is a memory reference whose address is x. x is an RTL
1.5342 ++ expression.
1.5343 ++
1.5344 ++ ToDo: fixme.
1.5345 ++*/
1.5346 ++void
1.5347 ++avr32_print_operand_address (FILE * stream, rtx x)
1.5348 ++{
1.5349 ++ fprintf (stream, "(%d) /* address */", REGNO (x));
1.5350 ++}
1.5351 ++
1.5352 ++/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */
1.5353 ++bool
1.5354 ++avr32_got_mentioned_p (rtx addr)
1.5355 ++{
1.5356 ++ if (GET_CODE (addr) == MEM)
1.5357 ++ addr = XEXP (addr, 0);
1.5358 ++ while (GET_CODE (addr) == CONST)
1.5359 ++ addr = XEXP (addr, 0);
1.5360 ++ if (GET_CODE (addr) == SYMBOL_REF)
1.5361 ++ {
1.5362 ++ return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
1.5363 ++ }
1.5364 ++ if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
1.5365 ++ {
1.5366 ++ bool l1, l2;
1.5367 ++
1.5368 ++ l1 = avr32_got_mentioned_p (XEXP (addr, 0));
1.5369 ++ l2 = avr32_got_mentioned_p (XEXP (addr, 1));
1.5370 ++ return l1 || l2;
1.5371 ++ }
1.5372 ++ return false;
1.5373 ++}
1.5374 ++
1.5375 ++
1.5376 ++/* Find the symbol in an address expression. */
1.5377 ++
1.5378 ++rtx
1.5379 ++avr32_find_symbol (rtx addr)
1.5380 ++{
1.5381 ++ if (GET_CODE (addr) == MEM)
1.5382 ++ addr = XEXP (addr, 0);
1.5383 ++
1.5384 ++ while (GET_CODE (addr) == CONST)
1.5385 ++ addr = XEXP (addr, 0);
1.5386 ++
1.5387 ++ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
1.5388 ++ return addr;
1.5389 ++ if (GET_CODE (addr) == PLUS)
1.5390 ++ {
1.5391 ++ rtx l1, l2;
1.5392 ++
1.5393 ++ l1 = avr32_find_symbol (XEXP (addr, 0));
1.5394 ++ l2 = avr32_find_symbol (XEXP (addr, 1));
1.5395 ++ if (l1 != NULL_RTX && l2 == NULL_RTX)
1.5396 ++ return l1;
1.5397 ++ else if (l1 == NULL_RTX && l2 != NULL_RTX)
1.5398 ++ return l2;
1.5399 ++ }
1.5400 ++
1.5401 ++ return NULL_RTX;
1.5402 ++}
1.5403 ++
1.5404 ++
1.5405 ++/* Routines for manipulation of the constant pool. */
1.5406 ++
1.5407 ++/* AVR32 instructions cannot load a large constant directly into a
1.5408 ++ register; they have to come from a pc relative load. The constant
1.5409 ++ must therefore be placed in the addressable range of the pc
1.5410 ++ relative load. Depending on the precise pc relative load
1.5411 ++ instruction the range is somewhere between 256 bytes and 4k. This
1.5412 ++ means that we often have to dump a constant inside a function, and
1.5413 ++ generate code to branch around it.
1.5414 ++
1.5415 ++ It is important to minimize this, since the branches will slow
1.5416 ++ things down and make the code larger.
1.5417 ++
1.5418 ++ Normally we can hide the table after an existing unconditional
1.5419 ++ branch so that there is no interruption of the flow, but in the
1.5420 ++ worst case the code looks like this:
1.5421 ++
1.5422 ++ lddpc rn, L1
1.5423 ++ ...
1.5424 ++ rjmp L2
1.5425 ++ align
1.5426 ++ L1: .long value
1.5427 ++ L2:
1.5428 ++ ...
1.5429 ++
1.5430 ++ lddpc rn, L3
1.5431 ++ ...
1.5432 ++ rjmp L4
1.5433 ++ align
1.5434 ++ L3: .long value
1.5435 ++ L4:
1.5436 ++ ...
1.5437 ++
1.5438 ++ We fix this by performing a scan after scheduling, which notices
1.5439 ++ which instructions need to have their operands fetched from the
1.5440 ++ constant table and builds the table.
1.5441 ++
1.5442 ++ The algorithm starts by building a table of all the constants that
1.5443 ++ need fixing up and all the natural barriers in the function (places
1.5444 ++ where a constant table can be dropped without breaking the flow).
1.5445 ++ For each fixup we note how far the pc-relative replacement will be
1.5446 ++ able to reach and the offset of the instruction into the function.
1.5447 ++
1.5448 ++ Having built the table we then group the fixes together to form
1.5449 ++ tables that are as large as possible (subject to addressing
1.5450 ++ constraints) and emit each table of constants after the last
1.5451 ++ barrier that is within range of all the instructions in the group.
1.5452 ++ If a group does not contain a barrier, then we forcibly create one
1.5453 ++ by inserting a jump instruction into the flow. Once the table has
1.5454 ++ been inserted, the insns are then modified to reference the
1.5455 ++ relevant entry in the pool.
1.5456 ++
1.5457 ++ Possible enhancements to the algorithm (not implemented) are:
1.5458 ++
1.5459 ++ 1) For some processors and object formats, there may be benefit in
1.5460 ++ aligning the pools to the start of cache lines; this alignment
1.5461 ++ would need to be taken into account when calculating addressability
1.5462 ++ of a pool. */
1.5463 ++
1.5464 ++/* These typedefs are located at the start of this file, so that
1.5465 ++ they can be used in the prototypes there. This comment is to
1.5466 ++ remind readers of that fact so that the following structures
1.5467 ++ can be understood more easily.
1.5468 ++
1.5469 ++ typedef struct minipool_node Mnode;
1.5470 ++ typedef struct minipool_fixup Mfix; */
1.5471 ++
1.5472 ++struct minipool_node
1.5473 ++{
1.5474 ++ /* Doubly linked chain of entries. */
1.5475 ++ Mnode *next;
1.5476 ++ Mnode *prev;
1.5477 ++ /* The maximum offset into the code that this entry can be placed. While
1.5478 ++ pushing fixes for forward references, all entries are sorted in order of
1.5479 ++ increasing max_address. */
1.5480 ++ HOST_WIDE_INT max_address;
1.5481 ++ /* Similarly for an entry inserted for a backwards ref. */
1.5482 ++ HOST_WIDE_INT min_address;
1.5483 ++ /* The number of fixes referencing this entry. This can become zero if we
1.5484 ++ "unpush" an entry. In this case we ignore the entry when we come to
1.5485 ++ emit the code. */
1.5486 ++ int refcount;
1.5487 ++ /* The offset from the start of the minipool. */
1.5488 ++ HOST_WIDE_INT offset;
1.5489 ++ /* The value in table. */
1.5490 ++ rtx value;
1.5491 ++ /* The mode of value. */
1.5492 ++ enum machine_mode mode;
1.5493 ++ /* The size of the value. */
1.5494 ++ int fix_size;
1.5495 ++};
1.5496 ++
1.5497 ++struct minipool_fixup
1.5498 ++{
1.5499 ++ Mfix *next;
1.5500 ++ rtx insn;
1.5501 ++ HOST_WIDE_INT address;
1.5502 ++ rtx *loc;
1.5503 ++ enum machine_mode mode;
1.5504 ++ int fix_size;
1.5505 ++ rtx value;
1.5506 ++ Mnode *minipool;
1.5507 ++ HOST_WIDE_INT forwards;
1.5508 ++ HOST_WIDE_INT backwards;
1.5509 ++};
1.5510 ++
1.5511 ++
1.5512 ++/* Fixes less than a word need padding out to a word boundary. */
1.5513 ++#define MINIPOOL_FIX_SIZE(mode, value) \
1.5514 ++ (IS_FORCE_MINIPOOL(value) ? 0 : \
1.5515 ++ (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
1.5516 ++
1.5517 ++#define IS_FORCE_MINIPOOL(x) \
1.5518 ++ (GET_CODE(x) == UNSPEC && \
1.5519 ++ XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
1.5520 ++
1.5521 ++static Mnode *minipool_vector_head;
1.5522 ++static Mnode *minipool_vector_tail;
1.5523 ++
1.5524 ++/* The linked list of all minipool fixes required for this function. */
1.5525 ++Mfix *minipool_fix_head;
1.5526 ++Mfix *minipool_fix_tail;
1.5527 ++/* The fix entry for the current minipool, once it has been placed. */
1.5528 ++Mfix *minipool_barrier;
1.5529 ++
1.5530 ++/* Determines if INSN is the start of a jump table. Returns the end
1.5531 ++ of the TABLE or NULL_RTX. */
1.5532 ++static rtx
1.5533 ++is_jump_table (rtx insn)
1.5534 ++{
1.5535 ++ rtx table;
1.5536 ++
1.5537 ++ if (GET_CODE (insn) == JUMP_INSN
1.5538 ++ && JUMP_LABEL (insn) != NULL
1.5539 ++ && ((table = next_real_insn (JUMP_LABEL (insn)))
1.5540 ++ == next_real_insn (insn))
1.5541 ++ && table != NULL
1.5542 ++ && GET_CODE (table) == JUMP_INSN
1.5543 ++ && (GET_CODE (PATTERN (table)) == ADDR_VEC
1.5544 ++ || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
1.5545 ++ return table;
1.5546 ++
1.5547 ++ return NULL_RTX;
1.5548 ++}
1.5549 ++
1.5550 ++static HOST_WIDE_INT
1.5551 ++get_jump_table_size (rtx insn)
1.5552 ++{
1.5553 ++ /* ADDR_VECs only take room if read-only data does into the text section. */
1.5554 ++ if (JUMP_TABLES_IN_TEXT_SECTION
1.5555 ++#if !defined(READONLY_DATA_SECTION_ASM_OP)
1.5556 ++ || 1
1.5557 ++#endif
1.5558 ++ )
1.5559 ++ {
1.5560 ++ rtx body = PATTERN (insn);
1.5561 ++ int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
1.5562 ++
1.5563 ++ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
1.5564 ++ }
1.5565 ++
1.5566 ++ return 0;
1.5567 ++}
1.5568 ++
1.5569 ++/* Move a minipool fix MP from its current location to before MAX_MP.
1.5570 ++ If MAX_MP is NULL, then MP doesn't need moving, but the addressing
1.5571 ++ constraints may need updating. */
1.5572 ++static Mnode *
1.5573 ++move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
1.5574 ++ HOST_WIDE_INT max_address)
1.5575 ++{
1.5576 ++ /* This should never be true and the code below assumes these are
1.5577 ++ different. */
1.5578 ++ if (mp == max_mp)
1.5579 ++ abort ();
1.5580 ++
1.5581 ++ if (max_mp == NULL)
1.5582 ++ {
1.5583 ++ if (max_address < mp->max_address)
1.5584 ++ mp->max_address = max_address;
1.5585 ++ }
1.5586 ++ else
1.5587 ++ {
1.5588 ++ if (max_address > max_mp->max_address - mp->fix_size)
1.5589 ++ mp->max_address = max_mp->max_address - mp->fix_size;
1.5590 ++ else
1.5591 ++ mp->max_address = max_address;
1.5592 ++
1.5593 ++ /* Unlink MP from its current position. Since max_mp is non-null,
1.5594 ++ mp->prev must be non-null. */
1.5595 ++ mp->prev->next = mp->next;
1.5596 ++ if (mp->next != NULL)
1.5597 ++ mp->next->prev = mp->prev;
1.5598 ++ else
1.5599 ++ minipool_vector_tail = mp->prev;
1.5600 ++
1.5601 ++ /* Re-insert it before MAX_MP. */
1.5602 ++ mp->next = max_mp;
1.5603 ++ mp->prev = max_mp->prev;
1.5604 ++ max_mp->prev = mp;
1.5605 ++
1.5606 ++ if (mp->prev != NULL)
1.5607 ++ mp->prev->next = mp;
1.5608 ++ else
1.5609 ++ minipool_vector_head = mp;
1.5610 ++ }
1.5611 ++
1.5612 ++ /* Save the new entry. */
1.5613 ++ max_mp = mp;
1.5614 ++
1.5615 ++ /* Scan over the preceding entries and adjust their addresses as required.
1.5616 ++ */
1.5617 ++ while (mp->prev != NULL
1.5618 ++ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
1.5619 ++ {
1.5620 ++ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
1.5621 ++ mp = mp->prev;
1.5622 ++ }
1.5623 ++
1.5624 ++ return max_mp;
1.5625 ++}
1.5626 ++
1.5627 ++/* Add a constant to the minipool for a forward reference. Returns the
1.5628 ++ node added or NULL if the constant will not fit in this pool. */
1.5629 ++static Mnode *
1.5630 ++add_minipool_forward_ref (Mfix * fix)
1.5631 ++{
1.5632 ++ /* If set, max_mp is the first pool_entry that has a lower constraint than
1.5633 ++ the one we are trying to add. */
1.5634 ++ Mnode *max_mp = NULL;
1.5635 ++ HOST_WIDE_INT max_address = fix->address + fix->forwards;
1.5636 ++ Mnode *mp;
1.5637 ++
1.5638 ++ /* If this fix's address is greater than the address of the first entry,
1.5639 ++ then we can't put the fix in this pool. We subtract the size of the
1.5640 ++ current fix to ensure that if the table is fully packed we still have
1.5641 ++ enough room to insert this value by suffling the other fixes forwards. */
1.5642 ++ if (minipool_vector_head &&
1.5643 ++ fix->address >= minipool_vector_head->max_address - fix->fix_size)
1.5644 ++ return NULL;
1.5645 ++
1.5646 ++ /* Scan the pool to see if a constant with the same value has already been
1.5647 ++ added. While we are doing this, also note the location where we must
1.5648 ++ insert the constant if it doesn't already exist. */
1.5649 ++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
1.5650 ++ {
1.5651 ++ if (GET_CODE (fix->value) == GET_CODE (mp->value)
1.5652 ++ && fix->mode == mp->mode
1.5653 ++ && (GET_CODE (fix->value) != CODE_LABEL
1.5654 ++ || (CODE_LABEL_NUMBER (fix->value)
1.5655 ++ == CODE_LABEL_NUMBER (mp->value)))
1.5656 ++ && rtx_equal_p (fix->value, mp->value))
1.5657 ++ {
1.5658 ++ /* More than one fix references this entry. */
1.5659 ++ mp->refcount++;
1.5660 ++ return move_minipool_fix_forward_ref (mp, max_mp, max_address);
1.5661 ++ }
1.5662 ++
1.5663 ++ /* Note the insertion point if necessary. */
1.5664 ++ if (max_mp == NULL && mp->max_address > max_address)
1.5665 ++ max_mp = mp;
1.5666 ++
1.5667 ++ }
1.5668 ++
1.5669 ++ /* The value is not currently in the minipool, so we need to create a new
1.5670 ++ entry for it. If MAX_MP is NULL, the entry will be put on the end of
1.5671 ++ the list since the placement is less constrained than any existing
1.5672 ++ entry. Otherwise, we insert the new fix before MAX_MP and, if
1.5673 ++ necessary, adjust the constraints on the other entries. */
1.5674 ++ mp = xmalloc (sizeof (*mp));
1.5675 ++ mp->fix_size = fix->fix_size;
1.5676 ++ mp->mode = fix->mode;
1.5677 ++ mp->value = fix->value;
1.5678 ++ mp->refcount = 1;
1.5679 ++ /* Not yet required for a backwards ref. */
1.5680 ++ mp->min_address = -65536;
1.5681 ++
1.5682 ++ if (max_mp == NULL)
1.5683 ++ {
1.5684 ++ mp->max_address = max_address;
1.5685 ++ mp->next = NULL;
1.5686 ++ mp->prev = minipool_vector_tail;
1.5687 ++
1.5688 ++ if (mp->prev == NULL)
1.5689 ++ {
1.5690 ++ minipool_vector_head = mp;
1.5691 ++ minipool_vector_label = gen_label_rtx ();
1.5692 ++ }
1.5693 ++ else
1.5694 ++ mp->prev->next = mp;
1.5695 ++
1.5696 ++ minipool_vector_tail = mp;
1.5697 ++ }
1.5698 ++ else
1.5699 ++ {
1.5700 ++ if (max_address > max_mp->max_address - mp->fix_size)
1.5701 ++ mp->max_address = max_mp->max_address - mp->fix_size;
1.5702 ++ else
1.5703 ++ mp->max_address = max_address;
1.5704 ++
1.5705 ++ mp->next = max_mp;
1.5706 ++ mp->prev = max_mp->prev;
1.5707 ++ max_mp->prev = mp;
1.5708 ++ if (mp->prev != NULL)
1.5709 ++ mp->prev->next = mp;
1.5710 ++ else
1.5711 ++ minipool_vector_head = mp;
1.5712 ++ }
1.5713 ++
1.5714 ++ /* Save the new entry. */
1.5715 ++ max_mp = mp;
1.5716 ++
1.5717 ++ /* Scan over the preceding entries and adjust their addresses as required.
1.5718 ++ */
1.5719 ++ while (mp->prev != NULL
1.5720 ++ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
1.5721 ++ {
1.5722 ++ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
1.5723 ++ mp = mp->prev;
1.5724 ++ }
1.5725 ++
1.5726 ++ return max_mp;
1.5727 ++}
1.5728 ++
1.5729 ++static Mnode *
1.5730 ++move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
1.5731 ++ HOST_WIDE_INT min_address)
1.5732 ++{
1.5733 ++ HOST_WIDE_INT offset;
1.5734 ++
1.5735 ++ /* This should never be true, and the code below assumes these are
1.5736 ++ different. */
1.5737 ++ if (mp == min_mp)
1.5738 ++ abort ();
1.5739 ++
1.5740 ++ if (min_mp == NULL)
1.5741 ++ {
1.5742 ++ if (min_address > mp->min_address)
1.5743 ++ mp->min_address = min_address;
1.5744 ++ }
1.5745 ++ else
1.5746 ++ {
1.5747 ++ /* We will adjust this below if it is too loose. */
1.5748 ++ mp->min_address = min_address;
1.5749 ++
1.5750 ++ /* Unlink MP from its current position. Since min_mp is non-null,
1.5751 ++ mp->next must be non-null. */
1.5752 ++ mp->next->prev = mp->prev;
1.5753 ++ if (mp->prev != NULL)
1.5754 ++ mp->prev->next = mp->next;
1.5755 ++ else
1.5756 ++ minipool_vector_head = mp->next;
1.5757 ++
1.5758 ++ /* Reinsert it after MIN_MP. */
1.5759 ++ mp->prev = min_mp;
1.5760 ++ mp->next = min_mp->next;
1.5761 ++ min_mp->next = mp;
1.5762 ++ if (mp->next != NULL)
1.5763 ++ mp->next->prev = mp;
1.5764 ++ else
1.5765 ++ minipool_vector_tail = mp;
1.5766 ++ }
1.5767 ++
1.5768 ++ min_mp = mp;
1.5769 ++
1.5770 ++ offset = 0;
1.5771 ++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
1.5772 ++ {
1.5773 ++ mp->offset = offset;
1.5774 ++ if (mp->refcount > 0)
1.5775 ++ offset += mp->fix_size;
1.5776 ++
1.5777 ++ if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
1.5778 ++ mp->next->min_address = mp->min_address + mp->fix_size;
1.5779 ++ }
1.5780 ++
1.5781 ++ return min_mp;
1.5782 ++}
1.5783 ++
1.5784 ++/* Add a constant to the minipool for a backward reference. Returns the
1.5785 ++ node added or NULL if the constant will not fit in this pool.
1.5786 ++
1.5787 ++ Note that the code for insertion for a backwards reference can be
1.5788 ++ somewhat confusing because the calculated offsets for each fix do
1.5789 ++ not take into account the size of the pool (which is still under
1.5790 ++ construction. */
1.5791 ++static Mnode *
1.5792 ++add_minipool_backward_ref (Mfix * fix)
1.5793 ++{
1.5794 ++ /* If set, min_mp is the last pool_entry that has a lower constraint than
1.5795 ++ the one we are trying to add. */
1.5796 ++ Mnode *min_mp = NULL;
1.5797 ++ /* This can be negative, since it is only a constraint. */
1.5798 ++ HOST_WIDE_INT min_address = fix->address - fix->backwards;
1.5799 ++ Mnode *mp;
1.5800 ++
1.5801 ++ /* If we can't reach the current pool from this insn, or if we can't insert
1.5802 ++ this entry at the end of the pool without pushing other fixes out of
1.5803 ++ range, then we don't try. This ensures that we can't fail later on. */
1.5804 ++ if (min_address >= minipool_barrier->address
1.5805 ++ || (minipool_vector_tail->min_address + fix->fix_size
1.5806 ++ >= minipool_barrier->address))
1.5807 ++ return NULL;
1.5808 ++
1.5809 ++ /* Scan the pool to see if a constant with the same value has already been
1.5810 ++ added. While we are doing this, also note the location where we must
1.5811 ++ insert the constant if it doesn't already exist. */
1.5812 ++ for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
1.5813 ++ {
1.5814 ++ if (GET_CODE (fix->value) == GET_CODE (mp->value)
1.5815 ++ && fix->mode == mp->mode
1.5816 ++ && (GET_CODE (fix->value) != CODE_LABEL
1.5817 ++ || (CODE_LABEL_NUMBER (fix->value)
1.5818 ++ == CODE_LABEL_NUMBER (mp->value)))
1.5819 ++ && rtx_equal_p (fix->value, mp->value)
1.5820 ++ /* Check that there is enough slack to move this entry to the end
1.5821 ++ of the table (this is conservative). */
1.5822 ++ && (mp->max_address
1.5823 ++ > (minipool_barrier->address
1.5824 ++ + minipool_vector_tail->offset
1.5825 ++ + minipool_vector_tail->fix_size)))
1.5826 ++ {
1.5827 ++ mp->refcount++;
1.5828 ++ return move_minipool_fix_backward_ref (mp, min_mp, min_address);
1.5829 ++ }
1.5830 ++
1.5831 ++ if (min_mp != NULL)
1.5832 ++ mp->min_address += fix->fix_size;
1.5833 ++ else
1.5834 ++ {
1.5835 ++ /* Note the insertion point if necessary. */
1.5836 ++ if (mp->min_address < min_address)
1.5837 ++ {
1.5838 ++ min_mp = mp;
1.5839 ++ }
1.5840 ++ else if (mp->max_address
1.5841 ++ < minipool_barrier->address + mp->offset + fix->fix_size)
1.5842 ++ {
1.5843 ++ /* Inserting before this entry would push the fix beyond its
1.5844 ++ maximum address (which can happen if we have re-located a
1.5845 ++ forwards fix); force the new fix to come after it. */
1.5846 ++ min_mp = mp;
1.5847 ++ min_address = mp->min_address + fix->fix_size;
1.5848 ++ }
1.5849 ++ }
1.5850 ++ }
1.5851 ++
1.5852 ++ /* We need to create a new entry. */
1.5853 ++ mp = xmalloc (sizeof (*mp));
1.5854 ++ mp->fix_size = fix->fix_size;
1.5855 ++ mp->mode = fix->mode;
1.5856 ++ mp->value = fix->value;
1.5857 ++ mp->refcount = 1;
1.5858 ++ mp->max_address = minipool_barrier->address + 65536;
1.5859 ++
1.5860 ++ mp->min_address = min_address;
1.5861 ++
1.5862 ++ if (min_mp == NULL)
1.5863 ++ {
1.5864 ++ mp->prev = NULL;
1.5865 ++ mp->next = minipool_vector_head;
1.5866 ++
1.5867 ++ if (mp->next == NULL)
1.5868 ++ {
1.5869 ++ minipool_vector_tail = mp;
1.5870 ++ minipool_vector_label = gen_label_rtx ();
1.5871 ++ }
1.5872 ++ else
1.5873 ++ mp->next->prev = mp;
1.5874 ++
1.5875 ++ minipool_vector_head = mp;
1.5876 ++ }
1.5877 ++ else
1.5878 ++ {
1.5879 ++ mp->next = min_mp->next;
1.5880 ++ mp->prev = min_mp;
1.5881 ++ min_mp->next = mp;
1.5882 ++
1.5883 ++ if (mp->next != NULL)
1.5884 ++ mp->next->prev = mp;
1.5885 ++ else
1.5886 ++ minipool_vector_tail = mp;
1.5887 ++ }
1.5888 ++
1.5889 ++ /* Save the new entry. */
1.5890 ++ min_mp = mp;
1.5891 ++
1.5892 ++ if (mp->prev)
1.5893 ++ mp = mp->prev;
1.5894 ++ else
1.5895 ++ mp->offset = 0;
1.5896 ++
1.5897 ++ /* Scan over the following entries and adjust their offsets. */
1.5898 ++ while (mp->next != NULL)
1.5899 ++ {
1.5900 ++ if (mp->next->min_address < mp->min_address + mp->fix_size)
1.5901 ++ mp->next->min_address = mp->min_address + mp->fix_size;
1.5902 ++
1.5903 ++ if (mp->refcount)
1.5904 ++ mp->next->offset = mp->offset + mp->fix_size;
1.5905 ++ else
1.5906 ++ mp->next->offset = mp->offset;
1.5907 ++
1.5908 ++ mp = mp->next;
1.5909 ++ }
1.5910 ++
1.5911 ++ return min_mp;
1.5912 ++}
1.5913 ++
1.5914 ++static void
1.5915 ++assign_minipool_offsets (Mfix * barrier)
1.5916 ++{
1.5917 ++ HOST_WIDE_INT offset = 0;
1.5918 ++ Mnode *mp;
1.5919 ++
1.5920 ++ minipool_barrier = barrier;
1.5921 ++
1.5922 ++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
1.5923 ++ {
1.5924 ++ mp->offset = offset;
1.5925 ++
1.5926 ++ if (mp->refcount > 0)
1.5927 ++ offset += mp->fix_size;
1.5928 ++ }
1.5929 ++}
1.5930 ++
1.5931 ++/* Print a symbolic form of X to the debug file, F. */
1.5932 ++static void
1.5933 ++avr32_print_value (FILE * f, rtx x)
1.5934 ++{
1.5935 ++ switch (GET_CODE (x))
1.5936 ++ {
1.5937 ++ case CONST_INT:
1.5938 ++ fprintf (f, "0x%x", (int) INTVAL (x));
1.5939 ++ return;
1.5940 ++
1.5941 ++ case CONST_DOUBLE:
1.5942 ++ fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3));
1.5943 ++ return;
1.5944 ++
1.5945 ++ case CONST_VECTOR:
1.5946 ++ {
1.5947 ++ int i;
1.5948 ++
1.5949 ++ fprintf (f, "<");
1.5950 ++ for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
1.5951 ++ {
1.5952 ++ fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
1.5953 ++ if (i < (CONST_VECTOR_NUNITS (x) - 1))
1.5954 ++ fputc (',', f);
1.5955 ++ }
1.5956 ++ fprintf (f, ">");
1.5957 ++ }
1.5958 ++ return;
1.5959 ++
1.5960 ++ case CONST_STRING:
1.5961 ++ fprintf (f, "\"%s\"", XSTR (x, 0));
1.5962 ++ return;
1.5963 ++
1.5964 ++ case SYMBOL_REF:
1.5965 ++ fprintf (f, "`%s'", XSTR (x, 0));
1.5966 ++ return;
1.5967 ++
1.5968 ++ case LABEL_REF:
1.5969 ++ fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
1.5970 ++ return;
1.5971 ++
1.5972 ++ case CONST:
1.5973 ++ avr32_print_value (f, XEXP (x, 0));
1.5974 ++ return;
1.5975 ++
1.5976 ++ case PLUS:
1.5977 ++ avr32_print_value (f, XEXP (x, 0));
1.5978 ++ fprintf (f, "+");
1.5979 ++ avr32_print_value (f, XEXP (x, 1));
1.5980 ++ return;
1.5981 ++
1.5982 ++ case PC:
1.5983 ++ fprintf (f, "pc");
1.5984 ++ return;
1.5985 ++
1.5986 ++ default:
1.5987 ++ fprintf (f, "????");
1.5988 ++ return;
1.5989 ++ }
1.5990 ++}
1.5991 ++
1.5992 ++int
1.5993 ++is_minipool_label (rtx label)
1.5994 ++{
1.5995 ++ minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
1.5996 ++
1.5997 ++ if (GET_CODE (label) != CODE_LABEL)
1.5998 ++ return FALSE;
1.5999 ++
1.6000 ++ while (cur_mp_label)
1.6001 ++ {
1.6002 ++ if (CODE_LABEL_NUMBER (label)
1.6003 ++ == CODE_LABEL_NUMBER (cur_mp_label->label))
1.6004 ++ return TRUE;
1.6005 ++ cur_mp_label = cur_mp_label->next;
1.6006 ++ }
1.6007 ++ return FALSE;
1.6008 ++}
1.6009 ++
1.6010 ++static void
1.6011 ++new_minipool_label (rtx label)
1.6012 ++{
1.6013 ++ if (!cfun->machine->minipool_label_head)
1.6014 ++ {
1.6015 ++ cfun->machine->minipool_label_head =
1.6016 ++ ggc_alloc (sizeof (minipool_labels));
1.6017 ++ cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
1.6018 ++ cfun->machine->minipool_label_head->label = label;
1.6019 ++ cfun->machine->minipool_label_head->next = 0;
1.6020 ++ cfun->machine->minipool_label_head->prev = 0;
1.6021 ++ }
1.6022 ++ else
1.6023 ++ {
1.6024 ++ cfun->machine->minipool_label_tail->next =
1.6025 ++ ggc_alloc (sizeof (minipool_labels));
1.6026 ++ cfun->machine->minipool_label_tail->next->label = label;
1.6027 ++ cfun->machine->minipool_label_tail->next->next = 0;
1.6028 ++ cfun->machine->minipool_label_tail->next->prev =
1.6029 ++ cfun->machine->minipool_label_tail;
1.6030 ++ cfun->machine->minipool_label_tail =
1.6031 ++ cfun->machine->minipool_label_tail->next;
1.6032 ++ }
1.6033 ++}
1.6034 ++
1.6035 ++/* Output the literal table */
1.6036 ++static void
1.6037 ++dump_minipool (rtx scan)
1.6038 ++{
1.6039 ++ Mnode *mp;
1.6040 ++ Mnode *nmp;
1.6041 ++
1.6042 ++ if (dump_file)
1.6043 ++ fprintf (dump_file,
1.6044 ++ ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
1.6045 ++ INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
1.6046 ++
1.6047 ++ scan = emit_insn_after (gen_consttable_start (), scan);
1.6048 ++ scan = emit_insn_after (gen_align_4 (), scan);
1.6049 ++ scan = emit_label_after (minipool_vector_label, scan);
1.6050 ++ new_minipool_label (minipool_vector_label);
1.6051 ++
1.6052 ++ for (mp = minipool_vector_head; mp != NULL; mp = nmp)
1.6053 ++ {
1.6054 ++ if (mp->refcount > 0)
1.6055 ++ {
1.6056 ++ if (dump_file)
1.6057 ++ {
1.6058 ++ fprintf (dump_file,
1.6059 ++ ";; Offset %u, min %ld, max %ld ",
1.6060 ++ (unsigned) mp->offset, (unsigned long) mp->min_address,
1.6061 ++ (unsigned long) mp->max_address);
1.6062 ++ avr32_print_value (dump_file, mp->value);
1.6063 ++ fputc ('\n', dump_file);
1.6064 ++ }
1.6065 ++
1.6066 ++ switch (mp->fix_size)
1.6067 ++ {
1.6068 ++#ifdef HAVE_consttable_4
1.6069 ++ case 4:
1.6070 ++ scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
1.6071 ++ break;
1.6072 ++
1.6073 ++#endif
1.6074 ++#ifdef HAVE_consttable_8
1.6075 ++ case 8:
1.6076 ++ scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
1.6077 ++ break;
1.6078 ++
1.6079 ++#endif
1.6080 ++#ifdef HAVE_consttable_16
1.6081 ++ case 16:
1.6082 ++ scan = emit_insn_after (gen_consttable_16 (mp->value), scan);
1.6083 ++ break;
1.6084 ++
1.6085 ++#endif
1.6086 ++ case 0:
1.6087 ++ /* This can happen for force-minipool entries which just are
1.6088 ++ there to force the minipool to be generate. */
1.6089 ++ break;
1.6090 ++ default:
1.6091 ++ abort ();
1.6092 ++ break;
1.6093 ++ }
1.6094 ++ }
1.6095 ++
1.6096 ++ nmp = mp->next;
1.6097 ++ free (mp);
1.6098 ++ }
1.6099 ++
1.6100 ++ minipool_vector_head = minipool_vector_tail = NULL;
1.6101 ++ scan = emit_insn_after (gen_consttable_end (), scan);
1.6102 ++ scan = emit_barrier_after (scan);
1.6103 ++}
1.6104 ++
1.6105 ++/* Return the cost of forcibly inserting a barrier after INSN. */
1.6106 ++static int
1.6107 ++avr32_barrier_cost (rtx insn)
1.6108 ++{
1.6109 ++ /* Basing the location of the pool on the loop depth is preferable, but at
1.6110 ++ the moment, the basic block information seems to be corrupt by this
1.6111 ++ stage of the compilation. */
1.6112 ++ int base_cost = 50;
1.6113 ++ rtx next = next_nonnote_insn (insn);
1.6114 ++
1.6115 ++ if (next != NULL && GET_CODE (next) == CODE_LABEL)
1.6116 ++ base_cost -= 20;
1.6117 ++
1.6118 ++ switch (GET_CODE (insn))
1.6119 ++ {
1.6120 ++ case CODE_LABEL:
1.6121 ++ /* It will always be better to place the table before the label, rather
1.6122 ++ than after it. */
1.6123 ++ return 50;
1.6124 ++
1.6125 ++ case INSN:
1.6126 ++ case CALL_INSN:
1.6127 ++ return base_cost;
1.6128 ++
1.6129 ++ case JUMP_INSN:
1.6130 ++ return base_cost - 10;
1.6131 ++
1.6132 ++ default:
1.6133 ++ return base_cost + 10;
1.6134 ++ }
1.6135 ++}
1.6136 ++
1.6137 ++/* Find the best place in the insn stream in the range
1.6138 ++ (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
1.6139 ++ Create the barrier by inserting a jump and add a new fix entry for
1.6140 ++ it. */
1.6141 ++static Mfix *
1.6142 ++create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address)
1.6143 ++{
1.6144 ++ HOST_WIDE_INT count = 0;
1.6145 ++ rtx barrier;
1.6146 ++ rtx from = fix->insn;
1.6147 ++ rtx selected = from;
1.6148 ++ int selected_cost;
1.6149 ++ HOST_WIDE_INT selected_address;
1.6150 ++ Mfix *new_fix;
1.6151 ++ HOST_WIDE_INT max_count = max_address - fix->address;
1.6152 ++ rtx label = gen_label_rtx ();
1.6153 ++
1.6154 ++ selected_cost = avr32_barrier_cost (from);
1.6155 ++ selected_address = fix->address;
1.6156 ++
1.6157 ++ while (from && count < max_count)
1.6158 ++ {
1.6159 ++ rtx tmp;
1.6160 ++ int new_cost;
1.6161 ++
1.6162 ++ /* This code shouldn't have been called if there was a natural barrier
1.6163 ++ within range. */
1.6164 ++ if (GET_CODE (from) == BARRIER)
1.6165 ++ abort ();
1.6166 ++
1.6167 ++ /* Count the length of this insn. */
1.6168 ++ count += get_attr_length (from);
1.6169 ++
1.6170 ++ /* If there is a jump table, add its length. */
1.6171 ++ tmp = is_jump_table (from);
1.6172 ++ if (tmp != NULL)
1.6173 ++ {
1.6174 ++ count += get_jump_table_size (tmp);
1.6175 ++
1.6176 ++ /* Jump tables aren't in a basic block, so base the cost on the
1.6177 ++ dispatch insn. If we select this location, we will still put
1.6178 ++ the pool after the table. */
1.6179 ++ new_cost = avr32_barrier_cost (from);
1.6180 ++
1.6181 ++ if (count < max_count && new_cost <= selected_cost)
1.6182 ++ {
1.6183 ++ selected = tmp;
1.6184 ++ selected_cost = new_cost;
1.6185 ++ selected_address = fix->address + count;
1.6186 ++ }
1.6187 ++
1.6188 ++ /* Continue after the dispatch table. */
1.6189 ++ from = NEXT_INSN (tmp);
1.6190 ++ continue;
1.6191 ++ }
1.6192 ++
1.6193 ++ new_cost = avr32_barrier_cost (from);
1.6194 ++
1.6195 ++ if (count < max_count && new_cost <= selected_cost)
1.6196 ++ {
1.6197 ++ selected = from;
1.6198 ++ selected_cost = new_cost;
1.6199 ++ selected_address = fix->address + count;
1.6200 ++ }
1.6201 ++
1.6202 ++ from = NEXT_INSN (from);
1.6203 ++ }
1.6204 ++
1.6205 ++ /* Create a new JUMP_INSN that branches around a barrier. */
1.6206 ++ from = emit_jump_insn_after (gen_jump (label), selected);
1.6207 ++ JUMP_LABEL (from) = label;
1.6208 ++ barrier = emit_barrier_after (from);
1.6209 ++ emit_label_after (label, barrier);
1.6210 ++
1.6211 ++ /* Create a minipool barrier entry for the new barrier. */
1.6212 ++ new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix));
1.6213 ++ new_fix->insn = barrier;
1.6214 ++ new_fix->address = selected_address;
1.6215 ++ new_fix->next = fix->next;
1.6216 ++ fix->next = new_fix;
1.6217 ++
1.6218 ++ return new_fix;
1.6219 ++}
1.6220 ++
1.6221 ++/* Record that there is a natural barrier in the insn stream at
1.6222 ++ ADDRESS. */
1.6223 ++static void
1.6224 ++push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
1.6225 ++{
1.6226 ++ Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
1.6227 ++
1.6228 ++ fix->insn = insn;
1.6229 ++ fix->address = address;
1.6230 ++
1.6231 ++ fix->next = NULL;
1.6232 ++ if (minipool_fix_head != NULL)
1.6233 ++ minipool_fix_tail->next = fix;
1.6234 ++ else
1.6235 ++ minipool_fix_head = fix;
1.6236 ++
1.6237 ++ minipool_fix_tail = fix;
1.6238 ++}
1.6239 ++
1.6240 ++/* Record INSN, which will need fixing up to load a value from the
1.6241 ++ minipool. ADDRESS is the offset of the insn since the start of the
1.6242 ++ function; LOC is a pointer to the part of the insn which requires
1.6243 ++ fixing; VALUE is the constant that must be loaded, which is of type
1.6244 ++ MODE. */
1.6245 ++static void
1.6246 ++push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc,
1.6247 ++ enum machine_mode mode, rtx value)
1.6248 ++{
1.6249 ++ Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
1.6250 ++ rtx body = PATTERN (insn);
1.6251 ++
1.6252 ++ fix->insn = insn;
1.6253 ++ fix->address = address;
1.6254 ++ fix->loc = loc;
1.6255 ++ fix->mode = mode;
1.6256 ++ fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
1.6257 ++ fix->value = value;
1.6258 ++
1.6259 ++ if (GET_CODE (body) == PARALLEL)
1.6260 ++ {
1.6261 ++ /* Mcall : Ks16 << 2 */
1.6262 ++ fix->forwards = ((1 << 15) - 1) << 2;
1.6263 ++ fix->backwards = (1 << 15) << 2;
1.6264 ++ }
1.6265 ++ else if (GET_CODE (body) == SET
1.6266 ++ && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
1.6267 ++ {
1.6268 ++ /* Word Load */
1.6269 ++ if (TARGET_HARD_FLOAT
1.6270 ++ && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
1.6271 ++ {
1.6272 ++ /* Ldc0.w : Ku12 << 2 */
1.6273 ++ fix->forwards = ((1 << 12) - 1) << 2;
1.6274 ++ fix->backwards = 0;
1.6275 ++ }
1.6276 ++ else
1.6277 ++ {
1.6278 ++ if (optimize_size)
1.6279 ++ {
1.6280 ++ /* Lddpc : Ku7 << 2 */
1.6281 ++ fix->forwards = ((1 << 7) - 1) << 2;
1.6282 ++ fix->backwards = 0;
1.6283 ++ }
1.6284 ++ else
1.6285 ++ {
1.6286 ++ /* Ld.w : Ks16 */
1.6287 ++ fix->forwards = ((1 << 15) - 4);
1.6288 ++ fix->backwards = (1 << 15);
1.6289 ++ }
1.6290 ++ }
1.6291 ++ }
1.6292 ++ else if (GET_CODE (body) == SET
1.6293 ++ && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
1.6294 ++ {
1.6295 ++ /* Double word load */
1.6296 ++ if (TARGET_HARD_FLOAT
1.6297 ++ && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
1.6298 ++ {
1.6299 ++ /* Ldc0.d : Ku12 << 2 */
1.6300 ++ fix->forwards = ((1 << 12) - 1) << 2;
1.6301 ++ fix->backwards = 0;
1.6302 ++ }
1.6303 ++ else
1.6304 ++ {
1.6305 ++ /* Ld.d : Ks16 */
1.6306 ++ fix->forwards = ((1 << 15) - 4);
1.6307 ++ fix->backwards = (1 << 15);
1.6308 ++ }
1.6309 ++ }
1.6310 ++ else if (GET_CODE (body) == UNSPEC_VOLATILE
1.6311 ++ && XINT (body, 1) == VUNSPEC_MVRC)
1.6312 ++ {
1.6313 ++ /* Coprocessor load */
1.6314 ++ /* Ldc : Ku8 << 2 */
1.6315 ++ fix->forwards = ((1 << 8) - 1) << 2;
1.6316 ++ fix->backwards = 0;
1.6317 ++ }
1.6318 ++ else
1.6319 ++ {
1.6320 ++ /* Assume worst case which is lddpc insn. */
1.6321 ++ fix->forwards = ((1 << 7) - 1) << 2;
1.6322 ++ fix->backwards = 0;
1.6323 ++ }
1.6324 ++
1.6325 ++ fix->minipool = NULL;
1.6326 ++
1.6327 ++ /* If an insn doesn't have a range defined for it, then it isn't expecting
1.6328 ++ to be reworked by this code. Better to abort now than to generate duff
1.6329 ++ assembly code. */
1.6330 ++ if (fix->forwards == 0 && fix->backwards == 0)
1.6331 ++ abort ();
1.6332 ++
1.6333 ++ if (dump_file)
1.6334 ++ {
1.6335 ++ fprintf (dump_file,
1.6336 ++ ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
1.6337 ++ GET_MODE_NAME (mode),
1.6338 ++ INSN_UID (insn), (unsigned long) address,
1.6339 ++ -1 * (long) fix->backwards, (long) fix->forwards);
1.6340 ++ avr32_print_value (dump_file, fix->value);
1.6341 ++ fprintf (dump_file, "\n");
1.6342 ++ }
1.6343 ++
1.6344 ++ /* Add it to the chain of fixes. */
1.6345 ++ fix->next = NULL;
1.6346 ++
1.6347 ++ if (minipool_fix_head != NULL)
1.6348 ++ minipool_fix_tail->next = fix;
1.6349 ++ else
1.6350 ++ minipool_fix_head = fix;
1.6351 ++
1.6352 ++ minipool_fix_tail = fix;
1.6353 ++}
1.6354 ++
1.6355 ++/* Scan INSN and note any of its operands that need fixing.
1.6356 ++ If DO_PUSHES is false we do not actually push any of the fixups
1.6357 ++ needed. The function returns TRUE is any fixups were needed/pushed.
1.6358 ++ This is used by avr32_memory_load_p() which needs to know about loads
1.6359 ++ of constants that will be converted into minipool loads. */
1.6360 ++static bool
1.6361 ++note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
1.6362 ++{
1.6363 ++ bool result = false;
1.6364 ++ int opno;
1.6365 ++
1.6366 ++ extract_insn (insn);
1.6367 ++
1.6368 ++ if (!constrain_operands (1))
1.6369 ++ fatal_insn_not_found (insn);
1.6370 ++
1.6371 ++ if (recog_data.n_alternatives == 0)
1.6372 ++ return false;
1.6373 ++
1.6374 ++ /* Fill in recog_op_alt with information about the constraints of this
1.6375 ++ insn. */
1.6376 ++ preprocess_constraints ();
1.6377 ++
1.6378 ++ for (opno = 0; opno < recog_data.n_operands; opno++)
1.6379 ++ {
1.6380 ++ rtx op;
1.6381 ++
1.6382 ++ /* Things we need to fix can only occur in inputs. */
1.6383 ++ if (recog_data.operand_type[opno] != OP_IN)
1.6384 ++ continue;
1.6385 ++
1.6386 ++ op = recog_data.operand[opno];
1.6387 ++
1.6388 ++ if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
1.6389 ++ {
1.6390 ++ if (do_pushes)
1.6391 ++ {
1.6392 ++ rtx cop = avoid_constant_pool_reference (op);
1.6393 ++
1.6394 ++ /* Casting the address of something to a mode narrower than a
1.6395 ++ word can cause avoid_constant_pool_reference() to return the
1.6396 ++ pool reference itself. That's no good to us here. Lets
1.6397 ++ just hope that we can use the constant pool value directly.
1.6398 ++ */
1.6399 ++ if (op == cop)
1.6400 ++ cop = get_pool_constant (XEXP (op, 0));
1.6401 ++
1.6402 ++ push_minipool_fix (insn, address,
1.6403 ++ recog_data.operand_loc[opno],
1.6404 ++ recog_data.operand_mode[opno], cop);
1.6405 ++ }
1.6406 ++
1.6407 ++ result = true;
1.6408 ++ }
1.6409 ++ else if (TARGET_HAS_ASM_ADDR_PSEUDOS
1.6410 ++ && avr32_address_operand (op, GET_MODE (op)))
1.6411 ++ {
1.6412 ++ /* Handle pseudo instructions using a direct address. These pseudo
1.6413 ++ instructions might need entries in the constant pool and we must
1.6414 ++ therefor create a constant pool for them, in case the
1.6415 ++ assembler/linker needs to insert entries. */
1.6416 ++ if (do_pushes)
1.6417 ++ {
1.6418 ++ /* Push a dummy constant pool entry so that the .cpool
1.6419 ++ directive should be inserted on the appropriate place in the
1.6420 ++ code even if there are no real constant pool entries. This
1.6421 ++ is used by the assembler and linker to know where to put
1.6422 ++ generated constant pool entries. */
1.6423 ++ push_minipool_fix (insn, address,
1.6424 ++ recog_data.operand_loc[opno],
1.6425 ++ recog_data.operand_mode[opno],
1.6426 ++ gen_rtx_UNSPEC (VOIDmode,
1.6427 ++ gen_rtvec (1, const0_rtx),
1.6428 ++ UNSPEC_FORCE_MINIPOOL));
1.6429 ++ result = true;
1.6430 ++ }
1.6431 ++ }
1.6432 ++ }
1.6433 ++ return result;
1.6434 ++}
1.6435 ++
1.6436 ++
1.6437 ++static int
1.6438 ++avr32_insn_is_cast (rtx insn)
1.6439 ++{
1.6440 ++
1.6441 ++ if (NONJUMP_INSN_P (insn)
1.6442 ++ && GET_CODE (PATTERN (insn)) == SET
1.6443 ++ && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
1.6444 ++ || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
1.6445 ++ && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
1.6446 ++ && REG_P (SET_DEST (PATTERN (insn))))
1.6447 ++ return true;
1.6448 ++ return false;
1.6449 ++}
1.6450 ++
1.6451 ++/*
1.6452 ++ Replace all occurances of reg FROM with reg TO in X */
1.6453 ++
1.6454 ++rtx
1.6455 ++avr32_replace_reg (rtx x, rtx from, rtx to)
1.6456 ++{
1.6457 ++ int i, j;
1.6458 ++ const char *fmt;
1.6459 ++
1.6460 ++ gcc_assert ( REG_P (from) && REG_P (to) );
1.6461 ++
1.6462 ++ /* Allow this function to make replacements in EXPR_LISTs. */
1.6463 ++ if (x == 0)
1.6464 ++ return 0;
1.6465 ++
1.6466 ++ if (rtx_equal_p (x, from))
1.6467 ++ return to;
1.6468 ++
1.6469 ++ if (GET_CODE (x) == SUBREG)
1.6470 ++ {
1.6471 ++ rtx new = avr32_replace_reg (SUBREG_REG (x), from, to);
1.6472 ++
1.6473 ++ if (GET_CODE (new) == CONST_INT)
1.6474 ++ {
1.6475 ++ x = simplify_subreg (GET_MODE (x), new,
1.6476 ++ GET_MODE (SUBREG_REG (x)),
1.6477 ++ SUBREG_BYTE (x));
1.6478 ++ gcc_assert (x);
1.6479 ++ }
1.6480 ++ else
1.6481 ++ SUBREG_REG (x) = new;
1.6482 ++
1.6483 ++ return x;
1.6484 ++ }
1.6485 ++ else if (GET_CODE (x) == ZERO_EXTEND)
1.6486 ++ {
1.6487 ++ rtx new = avr32_replace_reg (XEXP (x, 0), from, to);
1.6488 ++
1.6489 ++ if (GET_CODE (new) == CONST_INT)
1.6490 ++ {
1.6491 ++ x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
1.6492 ++ new, GET_MODE (XEXP (x, 0)));
1.6493 ++ gcc_assert (x);
1.6494 ++ }
1.6495 ++ else
1.6496 ++ XEXP (x, 0) = new;
1.6497 ++
1.6498 ++ return x;
1.6499 ++ }
1.6500 ++
1.6501 ++ fmt = GET_RTX_FORMAT (GET_CODE (x));
1.6502 ++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1.6503 ++ {
1.6504 ++ if (fmt[i] == 'e')
1.6505 ++ XEXP (x, i) = avr32_replace_reg (XEXP (x, i), from, to);
1.6506 ++ else if (fmt[i] == 'E')
1.6507 ++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1.6508 ++ XVECEXP (x, i, j) = avr32_replace_reg (XVECEXP (x, i, j), from, to);
1.6509 ++ }
1.6510 ++
1.6511 ++ return x;
1.6512 ++}
1.6513 ++
1.6514 ++
1.6515 ++/* FIXME: The level of nesting in this function is way too deep. It needs to be
1.6516 ++ torn apart. */
1.6517 ++static void
1.6518 ++avr32_reorg_optimization (void)
1.6519 ++{
1.6520 ++ rtx first = get_first_nonnote_insn ();
1.6521 ++ rtx insn;
1.6522 ++
1.6523 ++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
1.6524 ++ {
1.6525 ++
1.6526 ++ /* Scan through all insns looking for cast operations. */
1.6527 ++ if (dump_file)
1.6528 ++ {
1.6529 ++ fprintf (dump_file, ";; Deleting redundant cast operations:\n");
1.6530 ++ }
1.6531 ++ for (insn = first; insn; insn = NEXT_INSN (insn))
1.6532 ++ {
1.6533 ++ rtx reg, src_reg, scan;
1.6534 ++ enum machine_mode mode;
1.6535 ++ int unused_cast;
1.6536 ++ rtx label_ref;
1.6537 ++
1.6538 ++ if (avr32_insn_is_cast (insn)
1.6539 ++ && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
1.6540 ++ || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
1.6541 ++ {
1.6542 ++ mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
1.6543 ++ reg = SET_DEST (PATTERN (insn));
1.6544 ++ src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
1.6545 ++ }
1.6546 ++ else
1.6547 ++ {
1.6548 ++ continue;
1.6549 ++ }
1.6550 ++
1.6551 ++ unused_cast = false;
1.6552 ++ label_ref = NULL_RTX;
1.6553 ++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
1.6554 ++ {
1.6555 ++ /* Check if we have reached the destination of a simple
1.6556 ++ conditional jump which we have already scanned past. If so,
1.6557 ++ we can safely continue scanning. */
1.6558 ++ if (LABEL_P (scan) && label_ref != NULL_RTX)
1.6559 ++ {
1.6560 ++ if (CODE_LABEL_NUMBER (scan) ==
1.6561 ++ CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
1.6562 ++ label_ref = NULL_RTX;
1.6563 ++ else
1.6564 ++ break;
1.6565 ++ }
1.6566 ++
1.6567 ++ if (!INSN_P (scan))
1.6568 ++ continue;
1.6569 ++
1.6570 ++ /* For conditional jumps we can manage to keep on scanning if
1.6571 ++ we meet the destination label later on before any new jump
1.6572 ++ insns occure. */
1.6573 ++ if (GET_CODE (scan) == JUMP_INSN)
1.6574 ++ {
1.6575 ++ if (any_condjump_p (scan) && label_ref == NULL_RTX)
1.6576 ++ label_ref = condjump_label (scan);
1.6577 ++ else
1.6578 ++ break;
1.6579 ++ }
1.6580 ++
1.6581 ++ /* Check if we have a call and the register is used as an argument. */
1.6582 ++ if (CALL_P (scan)
1.6583 ++ && find_reg_fusage (scan, USE, reg) )
1.6584 ++ break;
1.6585 ++
1.6586 ++ if (!reg_mentioned_p (reg, PATTERN (scan)))
1.6587 ++ continue;
1.6588 ++
1.6589 ++ /* Check if casted register is used in this insn */
1.6590 ++ if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
1.6591 ++ && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
1.6592 ++ GET_MODE (reg)))
1.6593 ++ {
1.6594 ++ /* If not used in the source to the set or in a memory
1.6595 ++ expression in the destiantion then the register is used
1.6596 ++ as a destination and is really dead. */
1.6597 ++ if (single_set (scan)
1.6598 ++ && GET_CODE (PATTERN (scan)) == SET
1.6599 ++ && REG_P (SET_DEST (PATTERN (scan)))
1.6600 ++ && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
1.6601 ++ && label_ref == NULL_RTX)
1.6602 ++ {
1.6603 ++ unused_cast = true;
1.6604 ++ }
1.6605 ++ break;
1.6606 ++ }
1.6607 ++
1.6608 ++ /* Check if register is dead or set in this insn */
1.6609 ++ if (dead_or_set_p (scan, reg))
1.6610 ++ {
1.6611 ++ unused_cast = true;
1.6612 ++ break;
1.6613 ++ }
1.6614 ++ }
1.6615 ++
1.6616 ++ /* Check if we have unresolved conditional jumps */
1.6617 ++ if (label_ref != NULL_RTX)
1.6618 ++ continue;
1.6619 ++
1.6620 ++ if (unused_cast)
1.6621 ++ {
1.6622 ++ if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
1.6623 ++ {
1.6624 ++ /* One operand cast, safe to delete */
1.6625 ++ if (dump_file)
1.6626 ++ {
1.6627 ++ fprintf (dump_file,
1.6628 ++ ";; INSN %i removed, casted register %i value not used.\n",
1.6629 ++ INSN_UID (insn), REGNO (reg));
1.6630 ++ }
1.6631 ++ SET_INSN_DELETED (insn);
1.6632 ++ /* Force the instruction to be recognized again */
1.6633 ++ INSN_CODE (insn) = -1;
1.6634 ++ }
1.6635 ++ else
1.6636 ++ {
1.6637 ++ /* Two operand cast, which really could be substituted with
1.6638 ++ a move, if the source register is dead after the cast
1.6639 ++ insn and then the insn which sets the source register
1.6640 ++ could instead directly set the destination register for
1.6641 ++ the cast. As long as there are no insns in between which
1.6642 ++ uses the register. */
1.6643 ++ rtx link = NULL_RTX;
1.6644 ++ rtx set;
1.6645 ++ rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
1.6646 ++ unused_cast = false;
1.6647 ++
1.6648 ++ if (!find_reg_note (insn, REG_DEAD, src_reg))
1.6649 ++ continue;
1.6650 ++
1.6651 ++ /* Search for the insn which sets the source register */
1.6652 ++ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1.6653 ++ {
1.6654 ++ if (REG_NOTE_KIND (link) != 0)
1.6655 ++ continue;
1.6656 ++ set = single_set (XEXP (link, 0));
1.6657 ++ if (set && rtx_equal_p (src_reg, SET_DEST (set)))
1.6658 ++ {
1.6659 ++ link = XEXP (link, 0);
1.6660 ++ break;
1.6661 ++ }
1.6662 ++ }
1.6663 ++
1.6664 ++ /* Found no link or link is a call insn where we can not
1.6665 ++ change the destination register */
1.6666 ++ if (link == NULL_RTX || CALL_P (link))
1.6667 ++ continue;
1.6668 ++
1.6669 ++ /* Scan through all insn between link and insn */
1.6670 ++ for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
1.6671 ++ {
1.6672 ++ /* Don't try to trace forward past a CODE_LABEL if we
1.6673 ++ haven't seen INSN yet. Ordinarily, we will only
1.6674 ++ find the setting insn in LOG_LINKS if it is in the
1.6675 ++ same basic block. However, cross-jumping can insert
1.6676 ++ code labels in between the load and the call, and
1.6677 ++ can result in situations where a single call insn
1.6678 ++ may have two targets depending on where we came
1.6679 ++ from. */
1.6680 ++
1.6681 ++ if (GET_CODE (scan) == CODE_LABEL)
1.6682 ++ break;
1.6683 ++
1.6684 ++ if (!INSN_P (scan))
1.6685 ++ continue;
1.6686 ++
1.6687 ++ /* Don't try to trace forward past a JUMP. To optimize
1.6688 ++ safely, we would have to check that all the
1.6689 ++ instructions at the jump destination did not use REG.
1.6690 ++ */
1.6691 ++
1.6692 ++ if (GET_CODE (scan) == JUMP_INSN)
1.6693 ++ {
1.6694 ++ break;
1.6695 ++ }
1.6696 ++
1.6697 ++ if (!reg_mentioned_p (src_reg, PATTERN (scan)))
1.6698 ++ continue;
1.6699 ++
1.6700 ++ /* We have reached the cast insn */
1.6701 ++ if (scan == insn)
1.6702 ++ {
1.6703 ++ /* We can remove cast and replace the destination
1.6704 ++ register of the link insn with the destination
1.6705 ++ of the cast */
1.6706 ++ if (dump_file)
1.6707 ++ {
1.6708 ++ fprintf (dump_file,
1.6709 ++ ";; INSN %i removed, casted value unused. "
1.6710 ++ "Destination of removed cast operation: register %i, folded into INSN %i.\n",
1.6711 ++ INSN_UID (insn), REGNO (reg),
1.6712 ++ INSN_UID (link));
1.6713 ++ }
1.6714 ++ /* Update link insn */
1.6715 ++ SET_DEST (PATTERN (link)) =
1.6716 ++ gen_rtx_REG (mode, REGNO (reg));
1.6717 ++ /* Force the instruction to be recognized again */
1.6718 ++ INSN_CODE (link) = -1;
1.6719 ++
1.6720 ++ /* Delete insn */
1.6721 ++ SET_INSN_DELETED (insn);
1.6722 ++ /* Force the instruction to be recognized again */
1.6723 ++ INSN_CODE (insn) = -1;
1.6724 ++ break;
1.6725 ++ }
1.6726 ++ }
1.6727 ++ }
1.6728 ++ }
1.6729 ++ }
1.6730 ++ }
1.6731 ++
1.6732 ++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
1.6733 ++ {
1.6734 ++
1.6735 ++ /* Scan through all insns looking for shifted add operations */
1.6736 ++ if (dump_file)
1.6737 ++ {
1.6738 ++ fprintf (dump_file,
1.6739 ++ ";; Deleting redundant shifted add operations:\n");
1.6740 ++ }
1.6741 ++ for (insn = first; insn; insn = NEXT_INSN (insn))
1.6742 ++ {
1.6743 ++ rtx reg, mem_expr, scan, op0, op1;
1.6744 ++ int add_only_used_as_pointer;
1.6745 ++
1.6746 ++ if (INSN_P (insn)
1.6747 ++ && GET_CODE (PATTERN (insn)) == SET
1.6748 ++ && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
1.6749 ++ && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
1.6750 ++ || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
1.6751 ++ && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
1.6752 ++ CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
1.6753 ++ && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
1.6754 ++ && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
1.6755 ++ {
1.6756 ++ reg = SET_DEST (PATTERN (insn));
1.6757 ++ mem_expr = SET_SRC (PATTERN (insn));
1.6758 ++ op0 = XEXP (XEXP (mem_expr, 0), 0);
1.6759 ++ op1 = XEXP (mem_expr, 1);
1.6760 ++ }
1.6761 ++ else
1.6762 ++ {
1.6763 ++ continue;
1.6764 ++ }
1.6765 ++
1.6766 ++ /* Scan forward the check if the result of the shifted add
1.6767 ++ operation is only used as an address in memory operations and
1.6768 ++ that the operands to the shifted add are not clobbered. */
1.6769 ++ add_only_used_as_pointer = false;
1.6770 ++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
1.6771 ++ {
1.6772 ++ if (!INSN_P (scan))
1.6773 ++ continue;
1.6774 ++
1.6775 ++ /* Don't try to trace forward past a JUMP or CALL. To optimize
1.6776 ++ safely, we would have to check that all the instructions at
1.6777 ++ the jump destination did not use REG. */
1.6778 ++
1.6779 ++ if (GET_CODE (scan) == JUMP_INSN)
1.6780 ++ {
1.6781 ++ break;
1.6782 ++ }
1.6783 ++
1.6784 ++ /* If used in a call insn then we cannot optimize it away */
1.6785 ++ if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
1.6786 ++ break;
1.6787 ++
1.6788 ++ /* If any of the operands of the shifted add are clobbered we
1.6789 ++ cannot optimize the shifted adda away */
1.6790 ++ if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
1.6791 ++ || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
1.6792 ++ break;
1.6793 ++
1.6794 ++ if (!reg_mentioned_p (reg, PATTERN (scan)))
1.6795 ++ continue;
1.6796 ++
1.6797 ++ /* If used any other place than as a pointer or as the
1.6798 ++ destination register we failed */
1.6799 ++ if (!(single_set (scan)
1.6800 ++ && GET_CODE (PATTERN (scan)) == SET
1.6801 ++ && ((MEM_P (SET_DEST (PATTERN (scan)))
1.6802 ++ && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
1.6803 ++ && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == REGNO (reg))
1.6804 ++ || (MEM_P (SET_SRC (PATTERN (scan)))
1.6805 ++ && REG_P (XEXP (SET_SRC (PATTERN (scan)), 0))
1.6806 ++ && REGNO (XEXP
1.6807 ++ (SET_SRC (PATTERN (scan)), 0)) == REGNO (reg))))
1.6808 ++ && !(GET_CODE (PATTERN (scan)) == SET
1.6809 ++ && REG_P (SET_DEST (PATTERN (scan)))
1.6810 ++ && !regno_use_in (REGNO (reg),
1.6811 ++ SET_SRC (PATTERN (scan)))))
1.6812 ++ break;
1.6813 ++
1.6814 ++ /* We cannot replace the pointer in TImode insns
1.6815 ++ as these has a differene addressing mode than the other
1.6816 ++ memory insns. */
1.6817 ++ if ( GET_MODE (SET_DEST (PATTERN (scan))) == TImode )
1.6818 ++ break;
1.6819 ++
1.6820 ++ /* Check if register is dead or set in this insn */
1.6821 ++ if (dead_or_set_p (scan, reg))
1.6822 ++ {
1.6823 ++ add_only_used_as_pointer = true;
1.6824 ++ break;
1.6825 ++ }
1.6826 ++ }
1.6827 ++
1.6828 ++ if (add_only_used_as_pointer)
1.6829 ++ {
1.6830 ++ /* Lets delete the add insn and replace all memory references
1.6831 ++ which uses the pointer with the full expression. */
1.6832 ++ if (dump_file)
1.6833 ++ {
1.6834 ++ fprintf (dump_file,
1.6835 ++ ";; Deleting INSN %i since address expression can be folded into all "
1.6836 ++ "memory references using this expression\n",
1.6837 ++ INSN_UID (insn));
1.6838 ++ }
1.6839 ++ SET_INSN_DELETED (insn);
1.6840 ++ /* Force the instruction to be recognized again */
1.6841 ++ INSN_CODE (insn) = -1;
1.6842 ++
1.6843 ++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
1.6844 ++ {
1.6845 ++ if (!INSN_P (scan))
1.6846 ++ continue;
1.6847 ++
1.6848 ++ if (!reg_mentioned_p (reg, PATTERN (scan)))
1.6849 ++ continue;
1.6850 ++
1.6851 ++ /* If used any other place than as a pointer or as the
1.6852 ++ destination register we failed */
1.6853 ++ if ((single_set (scan)
1.6854 ++ && GET_CODE (PATTERN (scan)) == SET
1.6855 ++ && ((MEM_P (SET_DEST (PATTERN (scan)))
1.6856 ++ && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
1.6857 ++ && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
1.6858 ++ REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
1.6859 ++ &&
1.6860 ++ REG_P (XEXP
1.6861 ++ (SET_SRC (PATTERN (scan)),
1.6862 ++ 0))
1.6863 ++ &&
1.6864 ++ REGNO (XEXP
1.6865 ++ (SET_SRC (PATTERN (scan)),
1.6866 ++ 0)) == REGNO (reg)))))
1.6867 ++ {
1.6868 ++ if (dump_file)
1.6869 ++ {
1.6870 ++ fprintf (dump_file,
1.6871 ++ ";; Register %i replaced by indexed address in INSN %i\n",
1.6872 ++ REGNO (reg), INSN_UID (scan));
1.6873 ++ }
1.6874 ++ if (MEM_P (SET_DEST (PATTERN (scan))))
1.6875 ++ XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
1.6876 ++ else
1.6877 ++ XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
1.6878 ++ }
1.6879 ++
1.6880 ++ /* Check if register is dead or set in this insn */
1.6881 ++ if (dead_or_set_p (scan, reg))
1.6882 ++ {
1.6883 ++ break;
1.6884 ++ }
1.6885 ++
1.6886 ++ }
1.6887 ++ }
1.6888 ++ }
1.6889 ++ }
1.6890 ++
1.6891 ++
1.6892 ++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
1.6893 ++ {
1.6894 ++
1.6895 ++ /* Scan through all insns looking for conditional register to
1.6896 ++ register move operations */
1.6897 ++ if (dump_file)
1.6898 ++ {
1.6899 ++ fprintf (dump_file,
1.6900 ++ ";; Folding redundant conditional move operations:\n");
1.6901 ++ }
1.6902 ++ for (insn = first; insn; insn = next_nonnote_insn (insn))
1.6903 ++ {
1.6904 ++ rtx src_reg, dst_reg, scan, test;
1.6905 ++
1.6906 ++ if (INSN_P (insn)
1.6907 ++ && GET_CODE (PATTERN (insn)) == COND_EXEC
1.6908 ++ && GET_CODE (COND_EXEC_CODE (PATTERN (insn))) == SET
1.6909 ++ && REG_P (SET_SRC (COND_EXEC_CODE (PATTERN (insn))))
1.6910 ++ && REG_P (SET_DEST (COND_EXEC_CODE (PATTERN (insn))))
1.6911 ++ && find_reg_note (insn, REG_DEAD, SET_SRC (COND_EXEC_CODE (PATTERN (insn)))))
1.6912 ++ {
1.6913 ++ src_reg = SET_SRC (COND_EXEC_CODE (PATTERN (insn)));
1.6914 ++ dst_reg = SET_DEST (COND_EXEC_CODE (PATTERN (insn)));
1.6915 ++ test = COND_EXEC_TEST (PATTERN (insn));
1.6916 ++ }
1.6917 ++ else
1.6918 ++ {
1.6919 ++ continue;
1.6920 ++ }
1.6921 ++
1.6922 ++ /* Scan backward through the rest of insns in this if-then or if-else
1.6923 ++ block and check if we can fold the move into another of the conditional
1.6924 ++ insns in the same block. */
1.6925 ++ scan = prev_nonnote_insn (insn);
1.6926 ++ while (INSN_P (scan)
1.6927 ++ && GET_CODE (PATTERN (scan)) == COND_EXEC
1.6928 ++ && rtx_equal_p (COND_EXEC_TEST (PATTERN (scan)), test))
1.6929 ++ {
1.6930 ++ rtx pattern = COND_EXEC_CODE (PATTERN (scan));
1.6931 ++ if ( GET_CODE (pattern) == PARALLEL )
1.6932 ++ pattern = XVECEXP (pattern, 0, 0);
1.6933 ++
1.6934 ++ if ( reg_set_p (src_reg, pattern) )
1.6935 ++ {
1.6936 ++ /* Fold in the destination register for the cond. move
1.6937 ++ into this insn. */
1.6938 ++ SET_DEST (pattern) = dst_reg;
1.6939 ++ if (dump_file)
1.6940 ++ {
1.6941 ++ fprintf (dump_file,
1.6942 ++ ";; Deleting INSN %i since this operation can be folded into INSN %i\n",
1.6943 ++ INSN_UID (insn), INSN_UID (scan));
1.6944 ++ }
1.6945 ++
1.6946 ++ /* Scan and check if any of the insns in between uses the src_reg. We
1.6947 ++ must then replace it with the dst_reg. */
1.6948 ++ while ( (scan = next_nonnote_insn (scan)) != insn ){
1.6949 ++ avr32_replace_reg (scan, src_reg, dst_reg);
1.6950 ++ }
1.6951 ++ /* Delete the insn. */
1.6952 ++ SET_INSN_DELETED (insn);
1.6953 ++
1.6954 ++ /* Force the instruction to be recognized again */
1.6955 ++ INSN_CODE (insn) = -1;
1.6956 ++ break;
1.6957 ++ }
1.6958 ++
1.6959 ++ /* If the destination register is used but not set in this insn
1.6960 ++ we cannot fold. */
1.6961 ++ if ( reg_mentioned_p (dst_reg, pattern) )
1.6962 ++ break;
1.6963 ++
1.6964 ++ scan = prev_nonnote_insn (scan);
1.6965 ++ }
1.6966 ++ }
1.6967 ++ }
1.6968 ++
1.6969 ++}
1.6970 ++
1.6971 ++/* Exported to toplev.c.
1.6972 ++
1.6973 ++ Do a final pass over the function, just before delayed branch
1.6974 ++ scheduling. */
1.6975 ++
1.6976 ++static void
1.6977 ++avr32_reorg (void)
1.6978 ++{
1.6979 ++ rtx insn;
1.6980 ++ HOST_WIDE_INT address = 0;
1.6981 ++ Mfix *fix;
1.6982 ++
1.6983 ++ minipool_fix_head = minipool_fix_tail = NULL;
1.6984 ++
1.6985 ++ /* The first insn must always be a note, or the code below won't scan it
1.6986 ++ properly. */
1.6987 ++ insn = get_insns ();
1.6988 ++ if (GET_CODE (insn) != NOTE)
1.6989 ++ abort ();
1.6990 ++
1.6991 ++ /* Scan all the insns and record the operands that will need fixing. */
1.6992 ++ for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
1.6993 ++ {
1.6994 ++ if (GET_CODE (insn) == BARRIER)
1.6995 ++ push_minipool_barrier (insn, address);
1.6996 ++ else if (INSN_P (insn))
1.6997 ++ {
1.6998 ++ rtx table;
1.6999 ++
1.7000 ++ note_invalid_constants (insn, address, true);
1.7001 ++ address += get_attr_length (insn);
1.7002 ++
1.7003 ++ /* If the insn is a vector jump, add the size of the table and skip
1.7004 ++ the table. */
1.7005 ++ if ((table = is_jump_table (insn)) != NULL)
1.7006 ++ {
1.7007 ++ address += get_jump_table_size (table);
1.7008 ++ insn = table;
1.7009 ++ }
1.7010 ++ }
1.7011 ++ }
1.7012 ++
1.7013 ++ fix = minipool_fix_head;
1.7014 ++
1.7015 ++ /* Now scan the fixups and perform the required changes. */
1.7016 ++ while (fix)
1.7017 ++ {
1.7018 ++ Mfix *ftmp;
1.7019 ++ Mfix *fdel;
1.7020 ++ Mfix *last_added_fix;
1.7021 ++ Mfix *last_barrier = NULL;
1.7022 ++ Mfix *this_fix;
1.7023 ++
1.7024 ++ /* Skip any further barriers before the next fix. */
1.7025 ++ while (fix && GET_CODE (fix->insn) == BARRIER)
1.7026 ++ fix = fix->next;
1.7027 ++
1.7028 ++ /* No more fixes. */
1.7029 ++ if (fix == NULL)
1.7030 ++ break;
1.7031 ++
1.7032 ++ last_added_fix = NULL;
1.7033 ++
1.7034 ++ for (ftmp = fix; ftmp; ftmp = ftmp->next)
1.7035 ++ {
1.7036 ++ if (GET_CODE (ftmp->insn) == BARRIER)
1.7037 ++ {
1.7038 ++ if (ftmp->address >= minipool_vector_head->max_address)
1.7039 ++ break;
1.7040 ++
1.7041 ++ last_barrier = ftmp;
1.7042 ++ }
1.7043 ++ else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
1.7044 ++ break;
1.7045 ++
1.7046 ++ last_added_fix = ftmp; /* Keep track of the last fix added.
1.7047 ++ */
1.7048 ++ }
1.7049 ++
1.7050 ++ /* If we found a barrier, drop back to that; any fixes that we could
1.7051 ++ have reached but come after the barrier will now go in the next
1.7052 ++ mini-pool. */
1.7053 ++ if (last_barrier != NULL)
1.7054 ++ {
1.7055 ++ /* Reduce the refcount for those fixes that won't go into this pool
1.7056 ++ after all. */
1.7057 ++ for (fdel = last_barrier->next;
1.7058 ++ fdel && fdel != ftmp; fdel = fdel->next)
1.7059 ++ {
1.7060 ++ fdel->minipool->refcount--;
1.7061 ++ fdel->minipool = NULL;
1.7062 ++ }
1.7063 ++
1.7064 ++ ftmp = last_barrier;
1.7065 ++ }
1.7066 ++ else
1.7067 ++ {
1.7068 ++ /* ftmp is first fix that we can't fit into this pool and there no
1.7069 ++ natural barriers that we could use. Insert a new barrier in the
1.7070 ++ code somewhere between the previous fix and this one, and
1.7071 ++ arrange to jump around it. */
1.7072 ++ HOST_WIDE_INT max_address;
1.7073 ++
1.7074 ++ /* The last item on the list of fixes must be a barrier, so we can
1.7075 ++ never run off the end of the list of fixes without last_barrier
1.7076 ++ being set. */
1.7077 ++ if (ftmp == NULL)
1.7078 ++ abort ();
1.7079 ++
1.7080 ++ max_address = minipool_vector_head->max_address;
1.7081 ++ /* Check that there isn't another fix that is in range that we
1.7082 ++ couldn't fit into this pool because the pool was already too
1.7083 ++ large: we need to put the pool before such an instruction. */
1.7084 ++ if (ftmp->address < max_address)
1.7085 ++ max_address = ftmp->address;
1.7086 ++
1.7087 ++ last_barrier = create_fix_barrier (last_added_fix, max_address);
1.7088 ++ }
1.7089 ++
1.7090 ++ assign_minipool_offsets (last_barrier);
1.7091 ++
1.7092 ++ while (ftmp)
1.7093 ++ {
1.7094 ++ if (GET_CODE (ftmp->insn) != BARRIER
1.7095 ++ && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
1.7096 ++ == NULL))
1.7097 ++ break;
1.7098 ++
1.7099 ++ ftmp = ftmp->next;
1.7100 ++ }
1.7101 ++
1.7102 ++ /* Scan over the fixes we have identified for this pool, fixing them up
1.7103 ++ and adding the constants to the pool itself. */
1.7104 ++ for (this_fix = fix; this_fix && ftmp != this_fix;
1.7105 ++ this_fix = this_fix->next)
1.7106 ++ if (GET_CODE (this_fix->insn) != BARRIER
1.7107 ++ /* Do nothing for entries present just to force the insertion of
1.7108 ++ a minipool. */
1.7109 ++ && !IS_FORCE_MINIPOOL (this_fix->value))
1.7110 ++ {
1.7111 ++ rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
1.7112 ++ minipool_vector_label),
1.7113 ++ this_fix->minipool->offset);
1.7114 ++ *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
1.7115 ++ }
1.7116 ++
1.7117 ++ dump_minipool (last_barrier->insn);
1.7118 ++ fix = ftmp;
1.7119 ++ }
1.7120 ++
1.7121 ++ /* Free the minipool memory. */
1.7122 ++ obstack_free (&minipool_obstack, minipool_startobj);
1.7123 ++
1.7124 ++ avr32_reorg_optimization ();
1.7125 ++}
1.7126 ++
1.7127 ++
1.7128 ++/*
1.7129 ++ Hook for doing some final scanning of instructions. Does nothing yet...*/
1.7130 ++void
1.7131 ++avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
1.7132 ++ rtx * opvec ATTRIBUTE_UNUSED,
1.7133 ++ int noperands ATTRIBUTE_UNUSED)
1.7134 ++{
1.7135 ++ return;
1.7136 ++}
1.7137 ++
1.7138 ++
1.7139 ++/* Function for changing the condition on the next instruction,
1.7140 ++ should be used when emmiting compare instructions and
1.7141 ++ the condition of the next instruction needs to change.
1.7142 ++*/
1.7143 ++int
1.7144 ++set_next_insn_cond (rtx cur_insn, rtx new_cond)
1.7145 ++{
1.7146 ++ rtx next_insn = next_nonnote_insn (cur_insn);
1.7147 ++ if ((next_insn != NULL_RTX)
1.7148 ++ && (INSN_P (next_insn)))
1.7149 ++ {
1.7150 ++ if ((GET_CODE (PATTERN (next_insn)) == SET)
1.7151 ++ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
1.7152 ++ {
1.7153 ++ /* Branch instructions */
1.7154 ++ XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
1.7155 ++ /* Force the instruction to be recognized again */
1.7156 ++ INSN_CODE (next_insn) = -1;
1.7157 ++ return TRUE;
1.7158 ++ }
1.7159 ++ else if ((GET_CODE (PATTERN (next_insn)) == SET)
1.7160 ++ && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
1.7161 ++ GET_MODE (SET_SRC (PATTERN (next_insn)))))
1.7162 ++ {
1.7163 ++ /* scc with no compare */
1.7164 ++ SET_SRC (PATTERN (next_insn)) = new_cond;
1.7165 ++ /* Force the instruction to be recognized again */
1.7166 ++ INSN_CODE (next_insn) = -1;
1.7167 ++ return TRUE;
1.7168 ++ }
1.7169 ++ else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
1.7170 ++ {
1.7171 ++ if ( GET_CODE (new_cond) == UNSPEC )
1.7172 ++ {
1.7173 ++ COND_EXEC_TEST (PATTERN (next_insn)) =
1.7174 ++ gen_rtx_UNSPEC (CCmode,
1.7175 ++ gen_rtvec (2,
1.7176 ++ XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0),
1.7177 ++ XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1)),
1.7178 ++ XINT (new_cond, 1));
1.7179 ++ }
1.7180 ++ else
1.7181 ++ {
1.7182 ++ PUT_CODE(COND_EXEC_TEST (PATTERN (next_insn)), GET_CODE(new_cond));
1.7183 ++ }
1.7184 ++ }
1.7185 ++ }
1.7186 ++
1.7187 ++ return FALSE;
1.7188 ++}
1.7189 ++
1.7190 ++/* Function for obtaining the condition for the next instruction
1.7191 ++ after cur_insn.
1.7192 ++*/
1.7193 ++rtx
1.7194 ++get_next_insn_cond (rtx cur_insn)
1.7195 ++{
1.7196 ++ rtx next_insn = next_nonnote_insn (cur_insn);
1.7197 ++ rtx cond = NULL_RTX;
1.7198 ++ if (next_insn != NULL_RTX
1.7199 ++ && INSN_P (next_insn))
1.7200 ++ {
1.7201 ++ if ((GET_CODE (PATTERN (next_insn)) == SET)
1.7202 ++ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
1.7203 ++ {
1.7204 ++ /* Branch and cond if then else instructions */
1.7205 ++ cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
1.7206 ++ }
1.7207 ++ else if ((GET_CODE (PATTERN (next_insn)) == SET)
1.7208 ++ && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
1.7209 ++ GET_MODE (SET_SRC (PATTERN (next_insn)))))
1.7210 ++ {
1.7211 ++ /* scc with no compare */
1.7212 ++ cond = SET_SRC (PATTERN (next_insn));
1.7213 ++ }
1.7214 ++ else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
1.7215 ++ {
1.7216 ++ cond = COND_EXEC_TEST (PATTERN (next_insn));
1.7217 ++ }
1.7218 ++ }
1.7219 ++ return cond;
1.7220 ++}
1.7221 ++
1.7222 ++
1.7223 ++rtx
1.7224 ++avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1)
1.7225 ++{
1.7226 ++
1.7227 ++ rtx new_cond = NULL_RTX;
1.7228 ++ rtx ops[2];
1.7229 ++ rtx compare_pattern;
1.7230 ++ ops[0] = op0;
1.7231 ++ ops[1] = op1;
1.7232 ++
1.7233 ++ if ( GET_CODE (op0) == AND )
1.7234 ++ compare_pattern = op0;
1.7235 ++ else
1.7236 ++ compare_pattern = gen_rtx_COMPARE (mode, op0, op1);
1.7237 ++
1.7238 ++ new_cond = is_compare_redundant (compare_pattern, cond);
1.7239 ++
1.7240 ++ if (new_cond != NULL_RTX)
1.7241 ++ return new_cond;
1.7242 ++
1.7243 ++ /* Check if we are inserting a bit-load instead of a compare. */
1.7244 ++ if ( GET_CODE (op0) == AND )
1.7245 ++ {
1.7246 ++ ops[0] = XEXP (op0, 0);
1.7247 ++ ops[1] = XEXP (op0, 1);
1.7248 ++ output_asm_insn ("bld\t%0, %p1", ops);
1.7249 ++ return cond;
1.7250 ++ }
1.7251 ++
1.7252 ++ /* Insert compare */
1.7253 ++ switch (mode)
1.7254 ++ {
1.7255 ++ case QImode:
1.7256 ++ output_asm_insn ("cp.b\t%0, %1", ops);
1.7257 ++ break;
1.7258 ++ case HImode:
1.7259 ++ output_asm_insn ("cp.h\t%0, %1", ops);
1.7260 ++ break;
1.7261 ++ case SImode:
1.7262 ++ output_asm_insn ("cp.w\t%0, %1", ops);
1.7263 ++ break;
1.7264 ++ case DImode:
1.7265 ++ if (GET_CODE (op1) != REG)
1.7266 ++ output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
1.7267 ++ else
1.7268 ++ output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
1.7269 ++ break;
1.7270 ++ default:
1.7271 ++ internal_error ("Unknown comparison mode");
1.7272 ++ break;
1.7273 ++ }
1.7274 ++
1.7275 ++ return cond;
1.7276 ++}
1.7277 ++
1.7278 ++int
1.7279 ++avr32_load_multiple_operation (rtx op,
1.7280 ++ enum machine_mode mode ATTRIBUTE_UNUSED)
1.7281 ++{
1.7282 ++ int count = XVECLEN (op, 0);
1.7283 ++ unsigned int dest_regno;
1.7284 ++ rtx src_addr;
1.7285 ++ rtx elt;
1.7286 ++ int i = 1, base = 0;
1.7287 ++
1.7288 ++ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
1.7289 ++ return 0;
1.7290 ++
1.7291 ++ /* Check to see if this might be a write-back. */
1.7292 ++ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
1.7293 ++ {
1.7294 ++ i++;
1.7295 ++ base = 1;
1.7296 ++
1.7297 ++ /* Now check it more carefully. */
1.7298 ++ if (GET_CODE (SET_DEST (elt)) != REG
1.7299 ++ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
1.7300 ++ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
1.7301 ++ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
1.7302 ++ return 0;
1.7303 ++ }
1.7304 ++
1.7305 ++ /* Perform a quick check so we don't blow up below. */
1.7306 ++ if (count <= 1
1.7307 ++ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
1.7308 ++ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
1.7309 ++ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
1.7310 ++ return 0;
1.7311 ++
1.7312 ++ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
1.7313 ++ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
1.7314 ++
1.7315 ++ for (; i < count; i++)
1.7316 ++ {
1.7317 ++ elt = XVECEXP (op, 0, i);
1.7318 ++
1.7319 ++ if (GET_CODE (elt) != SET
1.7320 ++ || GET_CODE (SET_DEST (elt)) != REG
1.7321 ++ || GET_MODE (SET_DEST (elt)) != SImode
1.7322 ++ || GET_CODE (SET_SRC (elt)) != UNSPEC)
1.7323 ++ return 0;
1.7324 ++ }
1.7325 ++
1.7326 ++ return 1;
1.7327 ++}
1.7328 ++
1.7329 ++int
1.7330 ++avr32_store_multiple_operation (rtx op,
1.7331 ++ enum machine_mode mode ATTRIBUTE_UNUSED)
1.7332 ++{
1.7333 ++ int count = XVECLEN (op, 0);
1.7334 ++ int src_regno;
1.7335 ++ rtx dest_addr;
1.7336 ++ rtx elt;
1.7337 ++ int i = 1;
1.7338 ++
1.7339 ++ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
1.7340 ++ return 0;
1.7341 ++
1.7342 ++ /* Perform a quick check so we don't blow up below. */
1.7343 ++ if (count <= i
1.7344 ++ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
1.7345 ++ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
1.7346 ++ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
1.7347 ++ return 0;
1.7348 ++
1.7349 ++ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
1.7350 ++ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
1.7351 ++
1.7352 ++ for (; i < count; i++)
1.7353 ++ {
1.7354 ++ elt = XVECEXP (op, 0, i);
1.7355 ++
1.7356 ++ if (GET_CODE (elt) != SET
1.7357 ++ || GET_CODE (SET_DEST (elt)) != MEM
1.7358 ++ || GET_MODE (SET_DEST (elt)) != SImode
1.7359 ++ || GET_CODE (SET_SRC (elt)) != UNSPEC)
1.7360 ++ return 0;
1.7361 ++ }
1.7362 ++
1.7363 ++ return 1;
1.7364 ++}
1.7365 ++
1.7366 ++int
1.7367 ++avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
1.7368 ++{
1.7369 ++ /* Check if they use the same accumulator */
1.7370 ++ if (rtx_equal_p
1.7371 ++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
1.7372 ++ {
1.7373 ++ return TRUE;
1.7374 ++ }
1.7375 ++
1.7376 ++ return FALSE;
1.7377 ++}
1.7378 ++
1.7379 ++int
1.7380 ++avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
1.7381 ++{
1.7382 ++ /*
1.7383 ++ Check if the mul instruction produces the accumulator for the mac
1.7384 ++ instruction. */
1.7385 ++ if (rtx_equal_p
1.7386 ++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
1.7387 ++ {
1.7388 ++ return TRUE;
1.7389 ++ }
1.7390 ++ return FALSE;
1.7391 ++}
1.7392 ++
1.7393 ++int
1.7394 ++avr32_store_bypass (rtx insn_out, rtx insn_in)
1.7395 ++{
1.7396 ++ /* Only valid bypass if the output result is used as an src in the store
1.7397 ++ instruction, NOT if used as a pointer or base. */
1.7398 ++ if (rtx_equal_p
1.7399 ++ (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
1.7400 ++ {
1.7401 ++ return TRUE;
1.7402 ++ }
1.7403 ++
1.7404 ++ return FALSE;
1.7405 ++}
1.7406 ++
1.7407 ++int
1.7408 ++avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
1.7409 ++{
1.7410 ++ /* Check if the register holding the result from the mul instruction is
1.7411 ++ used as a result register in the input instruction. */
1.7412 ++ if (rtx_equal_p
1.7413 ++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
1.7414 ++ {
1.7415 ++ return TRUE;
1.7416 ++ }
1.7417 ++
1.7418 ++ return FALSE;
1.7419 ++}
1.7420 ++
1.7421 ++int
1.7422 ++avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
1.7423 ++{
1.7424 ++ /* Check if the first loaded word in insn_out is used in insn_in. */
1.7425 ++ rtx dst_reg;
1.7426 ++ rtx second_loaded_reg;
1.7427 ++
1.7428 ++ /* If this is a double alu operation then the bypass is not valid */
1.7429 ++ if ((get_attr_type (insn_in) == TYPE_ALU
1.7430 ++ || get_attr_type (insn_in) == TYPE_ALU2)
1.7431 ++ && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
1.7432 ++ return FALSE;
1.7433 ++
1.7434 ++ /* Get the destination register in the load */
1.7435 ++ if (!REG_P (SET_DEST (PATTERN (insn_out))))
1.7436 ++ return FALSE;
1.7437 ++
1.7438 ++ dst_reg = SET_DEST (PATTERN (insn_out));
1.7439 ++ second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
1.7440 ++
1.7441 ++ if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
1.7442 ++ return TRUE;
1.7443 ++
1.7444 ++ return FALSE;
1.7445 ++}
1.7446 ++
1.7447 ++
1.7448 ++int
1.7449 ++avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
1.7450 ++{
1.7451 ++ /*
1.7452 ++ Check if the two first loaded word in insn_out are used in insn_in. */
1.7453 ++ rtx dst_reg;
1.7454 ++ rtx third_loaded_reg, fourth_loaded_reg;
1.7455 ++
1.7456 ++ /* Get the destination register in the load */
1.7457 ++ if (!REG_P (SET_DEST (PATTERN (insn_out))))
1.7458 ++ return FALSE;
1.7459 ++
1.7460 ++ dst_reg = SET_DEST (PATTERN (insn_out));
1.7461 ++ third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
1.7462 ++ fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
1.7463 ++
1.7464 ++ if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
1.7465 ++ && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
1.7466 ++ {
1.7467 ++ return TRUE;
1.7468 ++ }
1.7469 ++
1.7470 ++ return FALSE;
1.7471 ++}
1.7472 ++
1.7473 ++
1.7474 ++
1.7475 ++rtx
1.7476 ++avr32_ifcvt_modify_test (ce_if_block_t *ce_info,
1.7477 ++ rtx test ){
1.7478 ++ rtx branch_insn;
1.7479 ++ rtx cmp_test;
1.7480 ++ rtx compare_op0;
1.7481 ++ rtx compare_op1;
1.7482 ++
1.7483 ++
1.7484 ++ if ( !ce_info
1.7485 ++ || test == NULL_RTX
1.7486 ++ || !reg_mentioned_p (cc0_rtx, test))
1.7487 ++ return test;
1.7488 ++
1.7489 ++ branch_insn = BB_END (ce_info->test_bb);
1.7490 ++ cmp_test = PATTERN(prev_nonnote_insn (branch_insn));
1.7491 ++
1.7492 ++ if (GET_CODE(cmp_test) != SET
1.7493 ++ || !CC0_P(XEXP(cmp_test, 0)) )
1.7494 ++ return cmp_test;
1.7495 ++
1.7496 ++ if ( GET_CODE(SET_SRC(cmp_test)) == COMPARE ){
1.7497 ++ compare_op0 = XEXP(SET_SRC(cmp_test), 0);
1.7498 ++ compare_op1 = XEXP(SET_SRC(cmp_test), 1);
1.7499 ++ } else {
1.7500 ++ compare_op0 = SET_SRC(cmp_test);
1.7501 ++ compare_op1 = const0_rtx;
1.7502 ++ }
1.7503 ++
1.7504 ++ return gen_rtx_fmt_ee (GET_CODE(test), GET_MODE (compare_op0),
1.7505 ++ compare_op0, compare_op1);
1.7506 ++}
1.7507 ++
1.7508 ++
1.7509 ++
1.7510 ++rtx
1.7511 ++avr32_ifcvt_modify_insn (ce_if_block_t *ce_info,
1.7512 ++ rtx pattern,
1.7513 ++ rtx insn,
1.7514 ++ int *num_true_changes){
1.7515 ++ rtx test = COND_EXEC_TEST(pattern);
1.7516 ++ rtx op = COND_EXEC_CODE(pattern);
1.7517 ++ rtx cmp_insn;
1.7518 ++ rtx cond_exec_insn;
1.7519 ++ int inputs_set_outside_ifblock = 1;
1.7520 ++ basic_block current_bb = BLOCK_FOR_INSN (insn);
1.7521 ++ rtx bb_insn ;
1.7522 ++ enum machine_mode mode = GET_MODE (XEXP (op, 0));
1.7523 ++
1.7524 ++ if (CC0_P(XEXP(test, 0)))
1.7525 ++ test = avr32_ifcvt_modify_test (ce_info,
1.7526 ++ test );
1.7527 ++
1.7528 ++ pattern = gen_rtx_COND_EXEC (VOIDmode, test, op);
1.7529 ++
1.7530 ++ if ( !reload_completed )
1.7531 ++ {
1.7532 ++ rtx start;
1.7533 ++ int num_insns;
1.7534 ++ int max_insns = MAX_CONDITIONAL_EXECUTE;
1.7535 ++
1.7536 ++ if ( !ce_info )
1.7537 ++ return op;
1.7538 ++
1.7539 ++ /* Check if the insn is not suitable for conditional
1.7540 ++ execution. */
1.7541 ++ start_sequence ();
1.7542 ++ cond_exec_insn = emit_insn (pattern);
1.7543 ++ if ( recog_memoized (cond_exec_insn) < 0
1.7544 ++ && !no_new_pseudos )
1.7545 ++ {
1.7546 ++ /* Insn is not suitable for conditional execution, try
1.7547 ++ to fix it up by using an extra scratch register or
1.7548 ++ by pulling the operation outside the if-then-else
1.7549 ++ and then emiting a conditional move inside the if-then-else. */
1.7550 ++ end_sequence ();
1.7551 ++ if ( GET_CODE (op) != SET
1.7552 ++ || !REG_P (SET_DEST (op))
1.7553 ++ || GET_CODE (SET_SRC (op)) == IF_THEN_ELSE
1.7554 ++ || GET_MODE_SIZE (mode) > UNITS_PER_WORD )
1.7555 ++ return NULL_RTX;
1.7556 ++
1.7557 ++ /* Check if any of the input operands to the insn is set inside the
1.7558 ++ current block. */
1.7559 ++ if ( current_bb->index == ce_info->then_bb->index )
1.7560 ++ start = PREV_INSN (BB_HEAD (ce_info->then_bb));
1.7561 ++ else
1.7562 ++ start = PREV_INSN (BB_HEAD (ce_info->else_bb));
1.7563 ++
1.7564 ++
1.7565 ++ for ( bb_insn = next_nonnote_insn (start); bb_insn != insn; bb_insn = next_nonnote_insn (bb_insn) )
1.7566 ++ {
1.7567 ++ rtx set = single_set (bb_insn);
1.7568 ++
1.7569 ++ if ( set && reg_mentioned_p (SET_DEST (set), SET_SRC (op)))
1.7570 ++ {
1.7571 ++ inputs_set_outside_ifblock = 0;
1.7572 ++ break;
1.7573 ++ }
1.7574 ++ }
1.7575 ++
1.7576 ++ cmp_insn = prev_nonnote_insn (BB_END (ce_info->test_bb));
1.7577 ++
1.7578 ++
1.7579 ++ /* Check if we can insert more insns. */
1.7580 ++ num_insns = ( ce_info->num_then_insns +
1.7581 ++ ce_info->num_else_insns +
1.7582 ++ ce_info->num_cond_clobber_insns +
1.7583 ++ ce_info->num_extra_move_insns );
1.7584 ++
1.7585 ++ if ( ce_info->num_else_insns != 0 )
1.7586 ++ max_insns *=2;
1.7587 ++
1.7588 ++ if ( num_insns >= max_insns )
1.7589 ++ return NULL_RTX;
1.7590 ++
1.7591 ++ /* Check if we have an instruction which might be converted to
1.7592 ++ conditional form if we give it a scratch register to clobber. */
1.7593 ++ {
1.7594 ++ rtx clobber_insn;
1.7595 ++ rtx scratch_reg = gen_reg_rtx (mode);
1.7596 ++ rtx new_pattern = copy_rtx (pattern);
1.7597 ++ rtx set_src = SET_SRC (COND_EXEC_CODE (new_pattern));
1.7598 ++
1.7599 ++ rtx clobber = gen_rtx_CLOBBER (mode, scratch_reg);
1.7600 ++ rtx vec[2] = { COND_EXEC_CODE (new_pattern), clobber };
1.7601 ++ COND_EXEC_CODE (new_pattern) = gen_rtx_PARALLEL (mode, gen_rtvec_v (2, vec));
1.7602 ++
1.7603 ++ start_sequence ();
1.7604 ++ clobber_insn = emit_insn (new_pattern);
1.7605 ++
1.7606 ++ if ( recog_memoized (clobber_insn) >= 0
1.7607 ++ && ( ( GET_RTX_LENGTH (GET_CODE (set_src)) == 2
1.7608 ++ && CONST_INT_P (XEXP (set_src, 1))
1.7609 ++ && avr32_const_ok_for_constraint_p (INTVAL (XEXP (set_src, 1)), 'K', "Ks08") )
1.7610 ++ || !ce_info->else_bb
1.7611 ++ || current_bb->index == ce_info->else_bb->index ))
1.7612 ++ {
1.7613 ++ end_sequence ();
1.7614 ++ /* Force the insn to be recognized again. */
1.7615 ++ INSN_CODE (insn) = -1;
1.7616 ++
1.7617 ++ /* If this is the first change in this IF-block then
1.7618 ++ signal that we have made a change. */
1.7619 ++ if ( ce_info->num_cond_clobber_insns == 0
1.7620 ++ && ce_info->num_extra_move_insns == 0 )
1.7621 ++ *num_true_changes += 1;
1.7622 ++
1.7623 ++ ce_info->num_cond_clobber_insns++;
1.7624 ++
1.7625 ++ if (dump_file)
1.7626 ++ fprintf (dump_file,
1.7627 ++ "\nReplacing INSN %d with an insn using a scratch register for later ifcvt passes...\n",
1.7628 ++ INSN_UID (insn));
1.7629 ++
1.7630 ++ return COND_EXEC_CODE (new_pattern);
1.7631 ++ }
1.7632 ++ end_sequence ();
1.7633 ++ }
1.7634 ++
1.7635 ++ if ( inputs_set_outside_ifblock )
1.7636 ++ {
1.7637 ++ /* Check if the insn before the cmp is an and which used
1.7638 ++ together with the cmp can be optimized into a bld. If
1.7639 ++ so then we should try to put the insn before the and
1.7640 ++ so that we can catch the bld peephole. */
1.7641 ++ rtx set;
1.7642 ++ rtx insn_before_cmp_insn = prev_nonnote_insn (cmp_insn);
1.7643 ++ if (insn_before_cmp_insn
1.7644 ++ && (set = single_set (insn_before_cmp_insn))
1.7645 ++ && GET_CODE (SET_SRC (set)) == AND
1.7646 ++ && one_bit_set_operand (XEXP (SET_SRC (set), 1), SImode)
1.7647 ++ /* Also make sure that the insn does not set any
1.7648 ++ of the input operands to the insn we are pulling out. */
1.7649 ++ && !reg_mentioned_p (SET_DEST (set), SET_SRC (op)) )
1.7650 ++ cmp_insn = prev_nonnote_insn (cmp_insn);
1.7651 ++
1.7652 ++ /* We can try to put the operation outside the if-then-else
1.7653 ++ blocks and insert a move. */
1.7654 ++ if ( !insn_invalid_p (insn)
1.7655 ++ /* Do not allow conditional insns to be moved outside the
1.7656 ++ if-then-else. */
1.7657 ++ && !reg_mentioned_p (cc0_rtx, insn)
1.7658 ++ /* We cannot move memory loads outside of the if-then-else
1.7659 ++ since the memory access should not be perfomed if the
1.7660 ++ condition is not met. */
1.7661 ++ && !mem_mentioned_p (SET_SRC (op)) )
1.7662 ++ {
1.7663 ++ rtx scratch_reg = gen_reg_rtx (mode);
1.7664 ++ rtx op_pattern = copy_rtx (op);
1.7665 ++ rtx new_insn, seq;
1.7666 ++ rtx link, prev_link;
1.7667 ++ op = copy_rtx (op);
1.7668 ++ /* Emit the operation to a temp reg before the compare,
1.7669 ++ and emit a move inside the if-then-else, hoping that the
1.7670 ++ whole if-then-else can be converted to conditional
1.7671 ++ execution. */
1.7672 ++ SET_DEST (op_pattern) = scratch_reg;
1.7673 ++ start_sequence ();
1.7674 ++ new_insn = emit_insn (op_pattern);
1.7675 ++ seq = get_insns();
1.7676 ++ end_sequence ();
1.7677 ++
1.7678 ++ /* Check again that the insn is valid. For some insns the insn might
1.7679 ++ become invalid if the destination register is changed. Ie. for mulacc
1.7680 ++ operations. */
1.7681 ++ if ( insn_invalid_p (new_insn) )
1.7682 ++ return NULL_RTX;
1.7683 ++
1.7684 ++ emit_insn_before_setloc (seq, cmp_insn, INSN_LOCATOR (insn));
1.7685 ++
1.7686 ++ if (dump_file)
1.7687 ++ fprintf (dump_file,
1.7688 ++ "\nMoving INSN %d out of IF-block by adding INSN %d...\n",
1.7689 ++ INSN_UID (insn), INSN_UID (new_insn));
1.7690 ++
1.7691 ++ ce_info->extra_move_insns[ce_info->num_extra_move_insns] = insn;
1.7692 ++ ce_info->moved_insns[ce_info->num_extra_move_insns] = new_insn;
1.7693 ++ XEXP (op, 1) = scratch_reg;
1.7694 ++ /* Force the insn to be recognized again. */
1.7695 ++ INSN_CODE (insn) = -1;
1.7696 ++
1.7697 ++ /* Move REG_DEAD notes to the moved insn. */
1.7698 ++ prev_link = NULL_RTX;
1.7699 ++ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1.7700 ++ {
1.7701 ++ if (REG_NOTE_KIND (link) == REG_DEAD)
1.7702 ++ {
1.7703 ++ /* Add the REG_DEAD note to the new insn. */
1.7704 ++ rtx dead_reg = XEXP (link, 0);
1.7705 ++ REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_DEAD, dead_reg, REG_NOTES (new_insn));
1.7706 ++ /* Remove the REG_DEAD note from the insn we convert to a move. */
1.7707 ++ if ( prev_link )
1.7708 ++ XEXP (prev_link, 1) = XEXP (link, 1);
1.7709 ++ else
1.7710 ++ REG_NOTES (insn) = XEXP (link, 1);
1.7711 ++ }
1.7712 ++ else
1.7713 ++ {
1.7714 ++ prev_link = link;
1.7715 ++ }
1.7716 ++ }
1.7717 ++ /* Add a REG_DEAD note to signal that the scratch register is dead. */
1.7718 ++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, scratch_reg, REG_NOTES (insn));
1.7719 ++
1.7720 ++ /* If this is the first change in this IF-block then
1.7721 ++ signal that we have made a change. */
1.7722 ++ if ( ce_info->num_cond_clobber_insns == 0
1.7723 ++ && ce_info->num_extra_move_insns == 0 )
1.7724 ++ *num_true_changes += 1;
1.7725 ++
1.7726 ++ ce_info->num_extra_move_insns++;
1.7727 ++ return op;
1.7728 ++ }
1.7729 ++ }
1.7730 ++
1.7731 ++ /* We failed to fixup the insns, so this if-then-else can not be made
1.7732 ++ conditional. Just return NULL_RTX so that the if-then-else conversion
1.7733 ++ for this if-then-else will be cancelled. */
1.7734 ++ return NULL_RTX;
1.7735 ++ }
1.7736 ++ end_sequence ();
1.7737 ++ return op;
1.7738 ++ }
1.7739 ++
1.7740 ++ /* Signal that we have started if conversion after reload, which means
1.7741 ++ that it should be safe to split all the predicable clobber insns which
1.7742 ++ did not become cond_exec back into a simpler form if possible. */
1.7743 ++ cfun->machine->ifcvt_after_reload = 1;
1.7744 ++
1.7745 ++ return pattern;
1.7746 ++}
1.7747 ++
1.7748 ++
1.7749 ++void
1.7750 ++avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info,
1.7751 ++ int *num_true_changes)
1.7752 ++{
1.7753 ++ int n;
1.7754 ++
1.7755 ++ if ( ce_info->num_extra_move_insns > 0
1.7756 ++ && ce_info->num_cond_clobber_insns == 0)
1.7757 ++ /* Signal that we did not do any changes after all. */
1.7758 ++ *num_true_changes -= 1;
1.7759 ++
1.7760 ++ /* Remove any inserted move insns. */
1.7761 ++ for ( n = 0; n < ce_info->num_extra_move_insns; n++ )
1.7762 ++ {
1.7763 ++ rtx link, prev_link;
1.7764 ++
1.7765 ++ /* Remove REG_DEAD note since we are not needing the scratch register anyway. */
1.7766 ++ prev_link = NULL_RTX;
1.7767 ++ for (link = REG_NOTES (ce_info->extra_move_insns[n]); link; link = XEXP (link, 1))
1.7768 ++ {
1.7769 ++ if (REG_NOTE_KIND (link) == REG_DEAD)
1.7770 ++ {
1.7771 ++ if ( prev_link )
1.7772 ++ XEXP (prev_link, 1) = XEXP (link, 1);
1.7773 ++ else
1.7774 ++ REG_NOTES (ce_info->extra_move_insns[n]) = XEXP (link, 1);
1.7775 ++ }
1.7776 ++ else
1.7777 ++ {
1.7778 ++ prev_link = link;
1.7779 ++ }
1.7780 ++ }
1.7781 ++
1.7782 ++ /* Revert all reg_notes for the moved insn. */
1.7783 ++ for (link = REG_NOTES (ce_info->moved_insns[n]); link; link = XEXP (link, 1))
1.7784 ++ {
1.7785 ++ REG_NOTES (ce_info->extra_move_insns[n]) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
1.7786 ++ XEXP (link, 0),
1.7787 ++ REG_NOTES (ce_info->extra_move_insns[n]));
1.7788 ++ }
1.7789 ++
1.7790 ++ /* Remove the moved insn. */
1.7791 ++ remove_insn ( ce_info->moved_insns[n] );
1.7792 ++ }
1.7793 ++}
1.7794 ++
1.7795 ++/* Function returning TRUE if INSN with OPERANDS is a splittable
1.7796 ++ conditional immediate clobber insn. We assume that the insn is
1.7797 ++ already a conditional immediate clobber insns and do not check
1.7798 ++ for that. */
1.7799 ++int
1.7800 ++avr32_cond_imm_clobber_splittable (rtx insn,
1.7801 ++ rtx operands[])
1.7802 ++{
1.7803 ++ if ( (REGNO (operands[0]) != REGNO (operands[1]))
1.7804 ++ && (logical_binary_operator (SET_SRC (XVECEXP (PATTERN (insn),0,0)), VOIDmode)
1.7805 ++ || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
1.7806 ++ && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is16"))
1.7807 ++ || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
1.7808 ++ && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks16"))) )
1.7809 ++ return FALSE;
1.7810 ++
1.7811 ++ return TRUE;
1.7812 ++}
1.7813 ++
1.7814 ++/* Function for getting an integer value from a const_int or const_double
1.7815 ++ expression regardless of the HOST_WIDE_INT size. Each target cpu word
1.7816 ++ will be put into the val array where the LSW will be stored at the lowest
1.7817 ++ address and so forth. Assumes that const_expr is either a const_int or
1.7818 ++ const_double. Only valid for modes which have sizes that are a multiple
1.7819 ++ of the word size.
1.7820 ++*/
1.7821 ++void
1.7822 ++avr32_get_intval (enum machine_mode mode,
1.7823 ++ rtx const_expr,
1.7824 ++ HOST_WIDE_INT *val)
1.7825 ++{
1.7826 ++ int words_in_mode = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
1.7827 ++ const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
1.7828 ++
1.7829 ++ if ( GET_CODE(const_expr) == CONST_DOUBLE ){
1.7830 ++ HOST_WIDE_INT hi = CONST_DOUBLE_HIGH(const_expr);
1.7831 ++ HOST_WIDE_INT lo = CONST_DOUBLE_LOW(const_expr);
1.7832 ++ /* Evaluate hi and lo values of const_double. */
1.7833 ++ avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
1.7834 ++ GEN_INT (lo),
1.7835 ++ &val[0]);
1.7836 ++ avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
1.7837 ++ GEN_INT (hi),
1.7838 ++ &val[words_in_const_int]);
1.7839 ++ } else if ( GET_CODE(const_expr) == CONST_INT ){
1.7840 ++ HOST_WIDE_INT value = INTVAL(const_expr);
1.7841 ++ int word;
1.7842 ++ for ( word = 0; (word < words_in_mode) && (word < words_in_const_int); word++ ){
1.7843 ++ /* Shift word up to the MSW and shift down again to extract the
1.7844 ++ word and sign-extend. */
1.7845 ++ int lshift = (words_in_const_int - word - 1) * BITS_PER_WORD;
1.7846 ++ int rshift = (words_in_const_int-1) * BITS_PER_WORD;
1.7847 ++ val[word] = (value << lshift) >> rshift;
1.7848 ++ }
1.7849 ++
1.7850 ++ for ( ; word < words_in_mode; word++ ){
1.7851 ++ /* Just put the sign bits in the remaining words. */
1.7852 ++ val[word] = value < 0 ? -1 : 0;
1.7853 ++ }
1.7854 ++ }
1.7855 ++}
1.7856 ++
1.7857 ++void
1.7858 ++avr32_split_const_expr (enum machine_mode mode,
1.7859 ++ enum machine_mode new_mode,
1.7860 ++ rtx expr,
1.7861 ++ rtx *split_expr)
1.7862 ++{
1.7863 ++ int i, word;
1.7864 ++ int words_in_intval = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
1.7865 ++ int words_in_split_values = GET_MODE_SIZE (new_mode)/UNITS_PER_WORD;
1.7866 ++ const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
1.7867 ++ HOST_WIDE_INT *val = alloca (words_in_intval * UNITS_PER_WORD);
1.7868 ++
1.7869 ++ avr32_get_intval (mode, expr, val);
1.7870 ++
1.7871 ++ for ( i=0; i < (words_in_intval/words_in_split_values); i++ )
1.7872 ++ {
1.7873 ++ HOST_WIDE_INT value_lo = 0, value_hi = 0;
1.7874 ++ for ( word = 0; word < words_in_split_values; word++ )
1.7875 ++ {
1.7876 ++ if ( word >= words_in_const_int )
1.7877 ++ value_hi |= ((val[i * words_in_split_values + word] &
1.7878 ++ (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
1.7879 ++ << (BITS_PER_WORD * (word - words_in_const_int)));
1.7880 ++ else
1.7881 ++ value_lo |= ((val[i * words_in_split_values + word] &
1.7882 ++ (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
1.7883 ++ << (BITS_PER_WORD * word));
1.7884 ++ }
1.7885 ++ split_expr[i] = immed_double_const(value_lo, value_hi, new_mode);
1.7886 ++ }
1.7887 ++}
1.7888 ++
1.7889 ++
1.7890 ++/* Set up library functions to comply to AVR32 ABI */
1.7891 ++
1.7892 ++static void
1.7893 ++avr32_init_libfuncs (void)
1.7894 ++{
1.7895 ++ /* Convert gcc run-time function names to AVR32 ABI names */
1.7896 ++
1.7897 ++ /* Double-precision floating-point arithmetic. */
1.7898 ++ set_optab_libfunc (neg_optab, DFmode, NULL);
1.7899 ++
1.7900 ++ /* Double-precision comparisons. */
1.7901 ++ set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq");
1.7902 ++ set_optab_libfunc (ne_optab, DFmode, NULL);
1.7903 ++ set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt");
1.7904 ++ set_optab_libfunc (le_optab, DFmode, NULL);
1.7905 ++ set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge");
1.7906 ++ set_optab_libfunc (gt_optab, DFmode, NULL);
1.7907 ++
1.7908 ++ /* Single-precision floating-point arithmetic. */
1.7909 ++ set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul");
1.7910 ++ set_optab_libfunc (neg_optab, SFmode, NULL);
1.7911 ++
1.7912 ++ /* Single-precision comparisons. */
1.7913 ++ set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq");
1.7914 ++ set_optab_libfunc (ne_optab, SFmode, NULL);
1.7915 ++ set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt");
1.7916 ++ set_optab_libfunc (le_optab, SFmode, NULL);
1.7917 ++ set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge");
1.7918 ++ set_optab_libfunc (gt_optab, SFmode, NULL);
1.7919 ++
1.7920 ++ /* Floating-point to integer conversions. */
1.7921 ++ set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32");
1.7922 ++ set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32");
1.7923 ++ set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64");
1.7924 ++ set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64");
1.7925 ++ set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32");
1.7926 ++ set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32");
1.7927 ++ set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64");
1.7928 ++ set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64");
1.7929 ++
1.7930 ++ /* Conversions between floating types. */
1.7931 ++ set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32");
1.7932 ++ set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64");
1.7933 ++
1.7934 ++ /* Integer to floating-point conversions. Table 8. */
1.7935 ++ set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64");
1.7936 ++ set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64");
1.7937 ++ set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32");
1.7938 ++ set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32");
1.7939 ++ set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64");
1.7940 ++ set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32");
1.7941 ++ /* TODO: Add these to gcc library functions */
1.7942 ++ //set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL);
1.7943 ++ //set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL);
1.7944 ++
1.7945 ++ /* Long long. Table 9. */
1.7946 ++ set_optab_libfunc (smul_optab, DImode, "__avr32_mul64");
1.7947 ++ set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64");
1.7948 ++ set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64");
1.7949 ++ set_optab_libfunc (smod_optab, DImode, "__avr32_smod64");
1.7950 ++ set_optab_libfunc (umod_optab, DImode, "__avr32_umod64");
1.7951 ++ set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64");
1.7952 ++ set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64");
1.7953 ++ set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64");
1.7954 ++
1.7955 ++ /* Floating point library functions which have fast versions. */
1.7956 ++ if ( TARGET_FAST_FLOAT )
1.7957 ++ {
1.7958 ++ set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div_fast");
1.7959 ++ set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul_fast");
1.7960 ++ set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add_fast");
1.7961 ++ set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub_fast");
1.7962 ++ set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add_fast");
1.7963 ++ set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub_fast");
1.7964 ++ set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div_fast");
1.7965 ++ }
1.7966 ++ else
1.7967 ++ {
1.7968 ++ set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
1.7969 ++ set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
1.7970 ++ set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add");
1.7971 ++ set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub");
1.7972 ++ set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add");
1.7973 ++ set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub");
1.7974 ++ set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div");
1.7975 ++ }
1.7976 ++}
1.7977 +--- /dev/null
1.7978 ++++ b/gcc/config/avr32/avr32-elf.h
1.7979 +@@ -0,0 +1,84 @@
1.7980 ++/*
1.7981 ++ Elf specific definitions.
1.7982 ++ Copyright 2003-2006 Atmel Corporation.
1.7983 ++
1.7984 ++ Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.7985 ++
1.7986 ++ This file is part of GCC.
1.7987 ++
1.7988 ++ This program is free software; you can redistribute it and/or modify
1.7989 ++ it under the terms of the GNU General Public License as published by
1.7990 ++ the Free Software Foundation; either version 2 of the License, or
1.7991 ++ (at your option) any later version.
1.7992 ++
1.7993 ++ This program is distributed in the hope that it will be useful,
1.7994 ++ but WITHOUT ANY WARRANTY; without even the implied warranty of
1.7995 ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.7996 ++ GNU General Public License for more details.
1.7997 ++
1.7998 ++ You should have received a copy of the GNU General Public License
1.7999 ++ along with this program; if not, write to the Free Software
1.8000 ++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
1.8001 ++
1.8002 ++
1.8003 ++/*****************************************************************************
1.8004 ++ * Controlling the Compilator Driver, 'gcc'
1.8005 ++ *****************************************************************************/
1.8006 ++
1.8007 ++/* Run-time Target Specification. */
1.8008 ++#undef TARGET_VERSION
1.8009 ++#define TARGET_VERSION fputs (" (AVR32 GNU with ELF)", stderr);
1.8010 ++
1.8011 ++/*
1.8012 ++Another C string constant used much like LINK_SPEC. The
1.8013 ++difference between the two is that STARTFILE_SPEC is used at
1.8014 ++the very beginning of the command given to the linker.
1.8015 ++
1.8016 ++If this macro is not defined, a default is provided that loads the
1.8017 ++standard C startup file from the usual place. See gcc.c.
1.8018 ++*/
1.8019 ++#undef STARTFILE_SPEC
1.8020 ++#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s"
1.8021 ++
1.8022 ++#undef LINK_SPEC
1.8023 ++#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=uc3a3revd:-mavr32elf_uc3a3256s;:%{mpart=*:-mavr32elf_%*}} %{mcpu=*:-mavr32elf_%*}"
1.8024 ++
1.8025 ++
1.8026 ++/*
1.8027 ++Another C string constant used much like LINK_SPEC. The
1.8028 ++difference between the two is that ENDFILE_SPEC is used at
1.8029 ++the very end of the command given to the linker.
1.8030 ++
1.8031 ++Do not define this macro if it does not need to do anything.
1.8032 ++*/
1.8033 ++#undef ENDFILE_SPEC
1.8034 ++#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
1.8035 ++
1.8036 ++
1.8037 ++/* Target CPU builtins. */
1.8038 ++#define TARGET_CPU_CPP_BUILTINS() \
1.8039 ++ do \
1.8040 ++ { \
1.8041 ++ builtin_define ("__avr32__"); \
1.8042 ++ builtin_define ("__AVR32__"); \
1.8043 ++ builtin_define ("__AVR32_ELF__"); \
1.8044 ++ builtin_define (avr32_part->macro); \
1.8045 ++ builtin_define (avr32_arch->macro); \
1.8046 ++ if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
1.8047 ++ builtin_define ("__AVR32_AVR32A__"); \
1.8048 ++ else \
1.8049 ++ builtin_define ("__AVR32_AVR32B__"); \
1.8050 ++ if (TARGET_UNALIGNED_WORD) \
1.8051 ++ builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
1.8052 ++ if (TARGET_SIMD) \
1.8053 ++ builtin_define ("__AVR32_HAS_SIMD__"); \
1.8054 ++ if (TARGET_DSP) \
1.8055 ++ builtin_define ("__AVR32_HAS_DSP__"); \
1.8056 ++ if (TARGET_RMW) \
1.8057 ++ builtin_define ("__AVR32_HAS_RMW__"); \
1.8058 ++ if (TARGET_BRANCH_PRED) \
1.8059 ++ builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
1.8060 ++ if (TARGET_FAST_FLOAT) \
1.8061 ++ builtin_define ("__AVR32_FAST_FLOAT__"); \
1.8062 ++ } \
1.8063 ++ while (0)
1.8064 +--- /dev/null
1.8065 ++++ b/gcc/config/avr32/avr32.h
1.8066 +@@ -0,0 +1,3347 @@
1.8067 ++/*
1.8068 ++ Definitions of target machine for AVR32.
1.8069 ++ Copyright 2003-2006 Atmel Corporation.
1.8070 ++
1.8071 ++ Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.8072 ++ Initial porting by Anders �dland.
1.8073 ++
1.8074 ++ This file is part of GCC.
1.8075 ++
1.8076 ++ This program is free software; you can redistribute it and/or modify
1.8077 ++ it under the terms of the GNU General Public License as published by
1.8078 ++ the Free Software Foundation; either version 2 of the License, or
1.8079 ++ (at your option) any later version.
1.8080 ++
1.8081 ++ This program is distributed in the hope that it will be useful,
1.8082 ++ but WITHOUT ANY WARRANTY; without even the implied warranty of
1.8083 ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.8084 ++ GNU General Public License for more details.
1.8085 ++
1.8086 ++ You should have received a copy of the GNU General Public License
1.8087 ++ along with this program; if not, write to the Free Software
1.8088 ++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
1.8089 ++
1.8090 ++#ifndef GCC_AVR32_H
1.8091 ++#define GCC_AVR32_H
1.8092 ++
1.8093 ++
1.8094 ++#ifndef OBJECT_FORMAT_ELF
1.8095 ++#error avr32.h included before elfos.h
1.8096 ++#endif
1.8097 ++
1.8098 ++#ifndef LOCAL_LABEL_PREFIX
1.8099 ++#define LOCAL_LABEL_PREFIX "."
1.8100 ++#endif
1.8101 ++
1.8102 ++#ifndef SUBTARGET_CPP_SPEC
1.8103 ++#define SUBTARGET_CPP_SPEC "-D__ELF__"
1.8104 ++#endif
1.8105 ++
1.8106 ++
1.8107 ++extern struct rtx_def *avr32_compare_op0;
1.8108 ++extern struct rtx_def *avr32_compare_op1;
1.8109 ++
1.8110 ++
1.8111 ++extern struct rtx_def *avr32_acc_cache;
1.8112 ++
1.8113 ++/* cache instruction op5 codes */
1.8114 ++#define AVR32_CACHE_INVALIDATE_ICACHE 1
1.8115 ++
1.8116 ++/* These bits describe the different types of function supported
1.8117 ++ by the AVR32 backend. They are exclusive. ie a function cannot be both a
1.8118 ++ normal function and an interworked function, for example. Knowing the
1.8119 ++ type of a function is important for determining its prologue and
1.8120 ++ epilogue sequences.
1.8121 ++ Note value 7 is currently unassigned. Also note that the interrupt
1.8122 ++ function types all have bit 2 set, so that they can be tested for easily.
1.8123 ++ Note that 0 is deliberately chosen for AVR32_FT_UNKNOWN so that when the
1.8124 ++ machine_function structure is initialized (to zero) func_type will
1.8125 ++ default to unknown. This will force the first use of avr32_current_func_type
1.8126 ++ to call avr32_compute_func_type. */
1.8127 ++#define AVR32_FT_UNKNOWN 0 /* Type has not yet been determined.
1.8128 ++ */
1.8129 ++#define AVR32_FT_NORMAL 1 /* Your normal, straightforward
1.8130 ++ function. */
1.8131 ++#define AVR32_FT_ACALL 2 /* An acall function. */
1.8132 ++#define AVR32_FT_EXCEPTION_HANDLER 3 /* A C++ exception handler. */
1.8133 ++#define AVR32_FT_ISR_FULL 4 /* A fully shadowed interrupt mode. */
1.8134 ++#define AVR32_FT_ISR_HALF 5 /* A half shadowed interrupt mode. */
1.8135 ++#define AVR32_FT_ISR_NONE 6 /* No shadow registers. */
1.8136 ++
1.8137 ++#define AVR32_FT_TYPE_MASK ((1 << 3) - 1)
1.8138 ++
1.8139 ++/* In addition functions can have several type modifiers,
1.8140 ++ outlined by these bit masks: */
1.8141 ++#define AVR32_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR
1.8142 ++ and above. */
1.8143 ++#define AVR32_FT_NAKED (1 << 3) /* No prologue or epilogue. */
1.8144 ++#define AVR32_FT_VOLATILE (1 << 4) /* Does not return. */
1.8145 ++#define AVR32_FT_NESTED (1 << 5) /* Embedded inside another
1.8146 ++ func. */
1.8147 ++
1.8148 ++/* Some macros to test these flags. */
1.8149 ++#define AVR32_FUNC_TYPE(t) (t & AVR32_FT_TYPE_MASK)
1.8150 ++#define IS_INTERRUPT(t) (t & AVR32_FT_INTERRUPT)
1.8151 ++#define IS_VOLATILE(t) (t & AVR32_FT_VOLATILE)
1.8152 ++#define IS_NAKED(t) (t & AVR32_FT_NAKED)
1.8153 ++#define IS_NESTED(t) (t & AVR32_FT_NESTED)
1.8154 ++
1.8155 ++#define SYMBOL_FLAG_RMW_ADDR_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT
1.8156 ++#define SYMBOL_REF_RMW_ADDR(RTX) \
1.8157 ++ ((SYMBOL_REF_FLAGS (RTX) & (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT)) != 0)
1.8158 ++
1.8159 ++
1.8160 ++typedef struct minipool_labels
1.8161 ++GTY ((chain_next ("%h.next"), chain_prev ("%h.prev")))
1.8162 ++{
1.8163 ++ rtx label;
1.8164 ++ struct minipool_labels *prev;
1.8165 ++ struct minipool_labels *next;
1.8166 ++} minipool_labels;
1.8167 ++
1.8168 ++/* A C structure for machine-specific, per-function data.
1.8169 ++ This is added to the cfun structure. */
1.8170 ++
1.8171 ++typedef struct machine_function
1.8172 ++GTY (())
1.8173 ++{
1.8174 ++ /* Records the type of the current function. */
1.8175 ++ unsigned long func_type;
1.8176 ++ /* List of minipool labels, use for checking if code label is valid in a
1.8177 ++ memory expression */
1.8178 ++ minipool_labels *minipool_label_head;
1.8179 ++ minipool_labels *minipool_label_tail;
1.8180 ++ int ifcvt_after_reload;
1.8181 ++} machine_function;
1.8182 ++
1.8183 ++/* Initialize data used by insn expanders. This is called from insn_emit,
1.8184 ++ once for every function before code is generated. */
1.8185 ++#define INIT_EXPANDERS avr32_init_expanders ()
1.8186 ++
1.8187 ++/******************************************************************************
1.8188 ++ * SPECS
1.8189 ++ *****************************************************************************/
1.8190 ++
1.8191 ++#ifndef ASM_SPEC
1.8192 ++#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=ucr2nomul:-march=ucr2;:%{march=*:-march=%*}} %{mpart=uc3a3revd:-mpart=uc3a3256s;:%{mpart=*:-mpart=%*}}"
1.8193 ++#endif
1.8194 ++
1.8195 ++#ifndef MULTILIB_DEFAULTS
1.8196 ++#define MULTILIB_DEFAULTS { "march=ap", "" }
1.8197 ++#endif
1.8198 ++
1.8199 ++/******************************************************************************
1.8200 ++ * Run-time Target Specification
1.8201 ++ *****************************************************************************/
1.8202 ++#ifndef TARGET_VERSION
1.8203 ++#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)");
1.8204 ++#endif
1.8205 ++
1.8206 ++
1.8207 ++/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/
1.8208 ++enum part_type
1.8209 ++{
1.8210 ++ PART_TYPE_AVR32_NONE,
1.8211 ++ PART_TYPE_AVR32_AP7000,
1.8212 ++ PART_TYPE_AVR32_AP7001,
1.8213 ++ PART_TYPE_AVR32_AP7002,
1.8214 ++ PART_TYPE_AVR32_AP7200,
1.8215 ++ PART_TYPE_AVR32_UC3A0128,
1.8216 ++ PART_TYPE_AVR32_UC3A0256,
1.8217 ++ PART_TYPE_AVR32_UC3A0512,
1.8218 ++ PART_TYPE_AVR32_UC3A0512ES,
1.8219 ++ PART_TYPE_AVR32_UC3A1128,
1.8220 ++ PART_TYPE_AVR32_UC3A1256,
1.8221 ++ PART_TYPE_AVR32_UC3A1512,
1.8222 ++ PART_TYPE_AVR32_UC3A1512ES,
1.8223 ++ PART_TYPE_AVR32_UC3A3REVD,
1.8224 ++ PART_TYPE_AVR32_UC3A364,
1.8225 ++ PART_TYPE_AVR32_UC3A364S,
1.8226 ++ PART_TYPE_AVR32_UC3A3128,
1.8227 ++ PART_TYPE_AVR32_UC3A3128S,
1.8228 ++ PART_TYPE_AVR32_UC3A3256,
1.8229 ++ PART_TYPE_AVR32_UC3A3256S,
1.8230 ++ PART_TYPE_AVR32_UC3B064,
1.8231 ++ PART_TYPE_AVR32_UC3B0128,
1.8232 ++ PART_TYPE_AVR32_UC3B0256,
1.8233 ++ PART_TYPE_AVR32_UC3B0256ES,
1.8234 ++ PART_TYPE_AVR32_UC3B164,
1.8235 ++ PART_TYPE_AVR32_UC3B1128,
1.8236 ++ PART_TYPE_AVR32_UC3B1256,
1.8237 ++ PART_TYPE_AVR32_UC3B1256ES
1.8238 ++};
1.8239 ++
1.8240 ++/* Microarchitectures. */
1.8241 ++enum microarchitecture_type
1.8242 ++{
1.8243 ++ UARCH_TYPE_AVR32A,
1.8244 ++ UARCH_TYPE_AVR32B,
1.8245 ++ UARCH_TYPE_NONE
1.8246 ++};
1.8247 ++
1.8248 ++/* Architectures types which specifies the pipeline.
1.8249 ++ Keep this in sync with avr32_arch_types in avr32.c
1.8250 ++ and the pipeline attribute in avr32.md */
1.8251 ++enum architecture_type
1.8252 ++{
1.8253 ++ ARCH_TYPE_AVR32_AP,
1.8254 ++ ARCH_TYPE_AVR32_UCR1,
1.8255 ++ ARCH_TYPE_AVR32_UCR2,
1.8256 ++ ARCH_TYPE_AVR32_UCR2NOMUL,
1.8257 ++ ARCH_TYPE_AVR32_NONE
1.8258 ++};
1.8259 ++
1.8260 ++/* Flag specifying if the cpu has support for DSP instructions.*/
1.8261 ++#define FLAG_AVR32_HAS_DSP (1 << 0)
1.8262 ++/* Flag specifying if the cpu has support for Read-Modify-Write
1.8263 ++ instructions.*/
1.8264 ++#define FLAG_AVR32_HAS_RMW (1 << 1)
1.8265 ++/* Flag specifying if the cpu has support for SIMD instructions. */
1.8266 ++#define FLAG_AVR32_HAS_SIMD (1 << 2)
1.8267 ++/* Flag specifying if the cpu has support for unaligned memory word access. */
1.8268 ++#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3)
1.8269 ++/* Flag specifying if the cpu has support for branch prediction. */
1.8270 ++#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4)
1.8271 ++/* Flag specifying if the cpu has support for a return stack. */
1.8272 ++#define FLAG_AVR32_HAS_RETURN_STACK (1 << 5)
1.8273 ++/* Flag specifying if the cpu has caches. */
1.8274 ++#define FLAG_AVR32_HAS_CACHES (1 << 6)
1.8275 ++/* Flag specifying if the cpu has support for v2 insns. */
1.8276 ++#define FLAG_AVR32_HAS_V2_INSNS (1 << 7)
1.8277 ++/* Flag specifying that the cpu has buggy mul insns. */
1.8278 ++#define FLAG_AVR32_HAS_NO_MUL_INSNS (1 << 8)
1.8279 ++
1.8280 ++/* Structure for holding information about different avr32 CPUs/parts */
1.8281 ++struct part_type_s
1.8282 ++{
1.8283 ++ const char *const name;
1.8284 ++ enum part_type part_type;
1.8285 ++ enum architecture_type arch_type;
1.8286 ++ /* Must lie outside user's namespace. NULL == no macro. */
1.8287 ++ const char *const macro;
1.8288 ++};
1.8289 ++
1.8290 ++/* Structure for holding information about different avr32 pipeline
1.8291 ++ architectures. */
1.8292 ++struct arch_type_s
1.8293 ++{
1.8294 ++ const char *const name;
1.8295 ++ enum architecture_type arch_type;
1.8296 ++ enum microarchitecture_type uarch_type;
1.8297 ++ const unsigned long feature_flags;
1.8298 ++ /* Must lie outside user's namespace. NULL == no macro. */
1.8299 ++ const char *const macro;
1.8300 ++};
1.8301 ++
1.8302 ++extern const struct part_type_s *avr32_part;
1.8303 ++extern const struct arch_type_s *avr32_arch;
1.8304 ++
1.8305 ++#define TARGET_SIMD (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD)
1.8306 ++#define TARGET_DSP (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP)
1.8307 ++#define TARGET_RMW (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW)
1.8308 ++#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD)
1.8309 ++#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED)
1.8310 ++#define TARGET_RETURN_STACK (avr32_arch->feature_flags & FLAG_AVR32_HAS_RETURN_STACK)
1.8311 ++#define TARGET_V2_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_V2_INSNS)
1.8312 ++#define TARGET_CACHES (avr32_arch->feature_flags & FLAG_AVR32_HAS_CACHES)
1.8313 ++#define TARGET_NO_MUL_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_NO_MUL_INSNS)
1.8314 ++#define TARGET_ARCH_AP (avr32_arch->arch_type == ARCH_TYPE_AVR32_AP)
1.8315 ++#define TARGET_ARCH_UCR1 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR1)
1.8316 ++#define TARGET_ARCH_UCR2 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR2)
1.8317 ++#define TARGET_ARCH_UC (TARGET_ARCH_UCR1 || TARGET_ARCH_UCR2)
1.8318 ++#define TARGET_UARCH_AVR32A (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
1.8319 ++#define TARGET_UARCH_AVR32B (avr32_arch->uarch_type == UARCH_TYPE_AVR32B)
1.8320 ++
1.8321 ++#define CAN_DEBUG_WITHOUT_FP
1.8322 ++
1.8323 ++
1.8324 ++
1.8325 ++
1.8326 ++/******************************************************************************
1.8327 ++ * Storage Layout
1.8328 ++ *****************************************************************************/
1.8329 ++
1.8330 ++/*
1.8331 ++Define this macro to have the value 1 if the most significant bit in a
1.8332 ++byte has the lowest number; otherwise define it to have the value zero.
1.8333 ++This means that bit-field instructions count from the most significant
1.8334 ++bit. If the machine has no bit-field instructions, then this must still
1.8335 ++be defined, but it doesn't matter which value it is defined to. This
1.8336 ++macro need not be a constant.
1.8337 ++
1.8338 ++This macro does not affect the way structure fields are packed into
1.8339 ++bytes or words; that is controlled by BYTES_BIG_ENDIAN.
1.8340 ++*/
1.8341 ++#define BITS_BIG_ENDIAN 0
1.8342 ++
1.8343 ++/*
1.8344 ++Define this macro to have the value 1 if the most significant byte in a
1.8345 ++word has the lowest number. This macro need not be a constant.
1.8346 ++*/
1.8347 ++/*
1.8348 ++ Data is stored in an big-endian way.
1.8349 ++*/
1.8350 ++#define BYTES_BIG_ENDIAN 1
1.8351 ++
1.8352 ++/*
1.8353 ++Define this macro to have the value 1 if, in a multiword object, the
1.8354 ++most significant word has the lowest number. This applies to both
1.8355 ++memory locations and registers; GCC fundamentally assumes that the
1.8356 ++order of words in memory is the same as the order in registers. This
1.8357 ++macro need not be a constant.
1.8358 ++*/
1.8359 ++/*
1.8360 ++ Data is stored in an bin-endian way.
1.8361 ++*/
1.8362 ++#define WORDS_BIG_ENDIAN 1
1.8363 ++
1.8364 ++/*
1.8365 ++Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a
1.8366 ++constant value with the same meaning as WORDS_BIG_ENDIAN, which will be
1.8367 ++used only when compiling libgcc2.c. Typically the value will be set
1.8368 ++based on preprocessor defines.
1.8369 ++*/
1.8370 ++#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
1.8371 ++
1.8372 ++/*
1.8373 ++Define this macro to have the value 1 if DFmode, XFmode or
1.8374 ++TFmode floating point numbers are stored in memory with the word
1.8375 ++containing the sign bit at the lowest address; otherwise define it to
1.8376 ++have the value 0. This macro need not be a constant.
1.8377 ++
1.8378 ++You need not define this macro if the ordering is the same as for
1.8379 ++multi-word integers.
1.8380 ++*/
1.8381 ++/* #define FLOAT_WORDS_BIG_ENDIAN 1 */
1.8382 ++
1.8383 ++/*
1.8384 ++Define this macro to be the number of bits in an addressable storage
1.8385 ++unit (byte); normally 8.
1.8386 ++*/
1.8387 ++#define BITS_PER_UNIT 8
1.8388 ++
1.8389 ++/*
1.8390 ++Number of bits in a word; normally 32.
1.8391 ++*/
1.8392 ++#define BITS_PER_WORD 32
1.8393 ++
1.8394 ++/*
1.8395 ++Maximum number of bits in a word. If this is undefined, the default is
1.8396 ++BITS_PER_WORD. Otherwise, it is the constant value that is the
1.8397 ++largest value that BITS_PER_WORD can have at run-time.
1.8398 ++*/
1.8399 ++/* MAX_BITS_PER_WORD not defined*/
1.8400 ++
1.8401 ++/*
1.8402 ++Number of storage units in a word; normally 4.
1.8403 ++*/
1.8404 ++#define UNITS_PER_WORD 4
1.8405 ++
1.8406 ++/*
1.8407 ++Minimum number of units in a word. If this is undefined, the default is
1.8408 ++UNITS_PER_WORD. Otherwise, it is the constant value that is the
1.8409 ++smallest value that UNITS_PER_WORD can have at run-time.
1.8410 ++*/
1.8411 ++/* MIN_UNITS_PER_WORD not defined */
1.8412 ++
1.8413 ++/*
1.8414 ++Width of a pointer, in bits. You must specify a value no wider than the
1.8415 ++width of Pmode. If it is not equal to the width of Pmode,
1.8416 ++you must define POINTERS_EXTEND_UNSIGNED.
1.8417 ++*/
1.8418 ++#define POINTER_SIZE 32
1.8419 ++
1.8420 ++/*
1.8421 ++A C expression whose value is greater than zero if pointers that need to be
1.8422 ++extended from being POINTER_SIZE bits wide to Pmode are to
1.8423 ++be zero-extended and zero if they are to be sign-extended. If the value
1.8424 ++is less then zero then there must be an "ptr_extend" instruction that
1.8425 ++extends a pointer from POINTER_SIZE to Pmode.
1.8426 ++
1.8427 ++You need not define this macro if the POINTER_SIZE is equal
1.8428 ++to the width of Pmode.
1.8429 ++*/
1.8430 ++/* #define POINTERS_EXTEND_UNSIGNED */
1.8431 ++
1.8432 ++/*
1.8433 ++A Macro to update M and UNSIGNEDP when an object whose type
1.8434 ++is TYPE and which has the specified mode and signedness is to be
1.8435 ++stored in a register. This macro is only called when TYPE is a
1.8436 ++scalar type.
1.8437 ++
1.8438 ++On most RISC machines, which only have operations that operate on a full
1.8439 ++register, define this macro to set M to word_mode if
1.8440 ++M is an integer mode narrower than BITS_PER_WORD. In most
1.8441 ++cases, only integer modes should be widened because wider-precision
1.8442 ++floating-point operations are usually more expensive than their narrower
1.8443 ++counterparts.
1.8444 ++
1.8445 ++For most machines, the macro definition does not change UNSIGNEDP.
1.8446 ++However, some machines, have instructions that preferentially handle
1.8447 ++either signed or unsigned quantities of certain modes. For example, on
1.8448 ++the DEC Alpha, 32-bit loads from memory and 32-bit add instructions
1.8449 ++sign-extend the result to 64 bits. On such machines, set
1.8450 ++UNSIGNEDP according to which kind of extension is more efficient.
1.8451 ++
1.8452 ++Do not define this macro if it would never modify M.
1.8453 ++*/
1.8454 ++#define PROMOTE_MODE(M, UNSIGNEDP, TYPE) \
1.8455 ++ { \
1.8456 ++ if (GET_MODE_CLASS (M) == MODE_INT \
1.8457 ++ && GET_MODE_SIZE (M) < 4) \
1.8458 ++ { \
1.8459 ++ if (M == QImode) \
1.8460 ++ UNSIGNEDP = 1; \
1.8461 ++ else if (M == SImode) \
1.8462 ++ UNSIGNEDP = 0; \
1.8463 ++ (M) = SImode; \
1.8464 ++ } \
1.8465 ++ }
1.8466 ++
1.8467 ++#define PROMOTE_FUNCTION_MODE(M, UNSIGNEDP, TYPE) \
1.8468 ++ { \
1.8469 ++ if (GET_MODE_CLASS (M) == MODE_INT \
1.8470 ++ && GET_MODE_SIZE (M) < 4) \
1.8471 ++ { \
1.8472 ++ (M) = SImode; \
1.8473 ++ } \
1.8474 ++ }
1.8475 ++
1.8476 ++/* Define if operations between registers always perform the operation
1.8477 ++ on the full register even if a narrower mode is specified. */
1.8478 ++#define WORD_REGISTER_OPERATIONS
1.8479 ++
1.8480 ++/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
1.8481 ++ will either zero-extend or sign-extend. The value of this macro should
1.8482 ++ be the code that says which one of the two operations is implicitly
1.8483 ++ done, UNKNOWN if not known. */
1.8484 ++#define LOAD_EXTEND_OP(MODE) \
1.8485 ++ (((MODE) == QImode) ? ZERO_EXTEND \
1.8486 ++ : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)
1.8487 ++
1.8488 ++
1.8489 ++/*
1.8490 ++Define this macro if the promotion described by PROMOTE_MODE
1.8491 ++should only be performed for outgoing function arguments or
1.8492 ++function return values, as specified by PROMOTE_FUNCTION_ARGS
1.8493 ++and PROMOTE_FUNCTION_RETURN, respectively.
1.8494 ++*/
1.8495 ++/* #define PROMOTE_FOR_CALL_ONLY */
1.8496 ++
1.8497 ++/*
1.8498 ++Normal alignment required for function parameters on the stack, in
1.8499 ++bits. All stack parameters receive at least this much alignment
1.8500 ++regardless of data type. On most machines, this is the same as the
1.8501 ++size of an integer.
1.8502 ++*/
1.8503 ++#define PARM_BOUNDARY 32
1.8504 ++
1.8505 ++/*
1.8506 ++Define this macro to the minimum alignment enforced by hardware for the
1.8507 ++stack pointer on this machine. The definition is a C expression for the
1.8508 ++desired alignment (measured in bits). This value is used as a default
1.8509 ++if PREFERRED_STACK_BOUNDARY is not defined. On most machines,
1.8510 ++this should be the same as PARM_BOUNDARY.
1.8511 ++*/
1.8512 ++#define STACK_BOUNDARY 32
1.8513 ++
1.8514 ++/*
1.8515 ++Define this macro if you wish to preserve a certain alignment for the
1.8516 ++stack pointer, greater than what the hardware enforces. The definition
1.8517 ++is a C expression for the desired alignment (measured in bits). This
1.8518 ++macro must evaluate to a value equal to or larger than
1.8519 ++STACK_BOUNDARY.
1.8520 ++*/
1.8521 ++#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
1.8522 ++
1.8523 ++/*
1.8524 ++Alignment required for a function entry point, in bits.
1.8525 ++*/
1.8526 ++#define FUNCTION_BOUNDARY 16
1.8527 ++
1.8528 ++/*
1.8529 ++Biggest alignment that any data type can require on this machine, in bits.
1.8530 ++*/
1.8531 ++#define BIGGEST_ALIGNMENT (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
1.8532 ++
1.8533 ++/*
1.8534 ++If defined, the smallest alignment, in bits, that can be given to an
1.8535 ++object that can be referenced in one operation, without disturbing any
1.8536 ++nearby object. Normally, this is BITS_PER_UNIT, but may be larger
1.8537 ++on machines that don't have byte or half-word store operations.
1.8538 ++*/
1.8539 ++#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT
1.8540 ++
1.8541 ++
1.8542 ++/*
1.8543 ++An integer expression for the size in bits of the largest integer machine mode that
1.8544 ++should actually be used. All integer machine modes of this size or smaller can be
1.8545 ++used for structures and unions with the appropriate sizes. If this macro is undefined,
1.8546 ++GET_MODE_BITSIZE (DImode) is assumed.*/
1.8547 ++#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
1.8548 ++
1.8549 ++
1.8550 ++/*
1.8551 ++If defined, a C expression to compute the alignment given to a constant
1.8552 ++that is being placed in memory. CONSTANT is the constant and
1.8553 ++BASIC_ALIGN is the alignment that the object would ordinarily
1.8554 ++have. The value of this macro is used instead of that alignment to
1.8555 ++align the object.
1.8556 ++
1.8557 ++If this macro is not defined, then BASIC_ALIGN is used.
1.8558 ++
1.8559 ++The typical use of this macro is to increase alignment for string
1.8560 ++constants to be word aligned so that strcpy calls that copy
1.8561 ++constants can be done inline.
1.8562 ++*/
1.8563 ++#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
1.8564 ++ ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN)
1.8565 ++
1.8566 ++/* Try to align string to a word. */
1.8567 ++#define DATA_ALIGNMENT(TYPE, ALIGN) \
1.8568 ++ ({(TREE_CODE (TYPE) == ARRAY_TYPE \
1.8569 ++ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
1.8570 ++ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
1.8571 ++
1.8572 ++/* Try to align local store strings to a word. */
1.8573 ++#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
1.8574 ++ ({(TREE_CODE (TYPE) == ARRAY_TYPE \
1.8575 ++ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
1.8576 ++ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
1.8577 ++
1.8578 ++/*
1.8579 ++Define this macro to be the value 1 if instructions will fail to work
1.8580 ++if given data not on the nominal alignment. If instructions will merely
1.8581 ++go slower in that case, define this macro as 0.
1.8582 ++*/
1.8583 ++#define STRICT_ALIGNMENT 1
1.8584 ++
1.8585 ++/*
1.8586 ++Define this if you wish to imitate the way many other C compilers handle
1.8587 ++alignment of bit-fields and the structures that contain them.
1.8588 ++
1.8589 ++The behavior is that the type written for a bit-field (int,
1.8590 ++short, or other integer type) imposes an alignment for the
1.8591 ++entire structure, as if the structure really did contain an ordinary
1.8592 ++field of that type. In addition, the bit-field is placed within the
1.8593 ++structure so that it would fit within such a field, not crossing a
1.8594 ++boundary for it.
1.8595 ++
1.8596 ++Thus, on most machines, a bit-field whose type is written as int
1.8597 ++would not cross a four-byte boundary, and would force four-byte
1.8598 ++alignment for the whole structure. (The alignment used may not be four
1.8599 ++bytes; it is controlled by the other alignment parameters.)
1.8600 ++
1.8601 ++If the macro is defined, its definition should be a C expression;
1.8602 ++a nonzero value for the expression enables this behavior.
1.8603 ++
1.8604 ++Note that if this macro is not defined, or its value is zero, some
1.8605 ++bit-fields may cross more than one alignment boundary. The compiler can
1.8606 ++support such references if there are insv, extv, and
1.8607 ++extzv insns that can directly reference memory.
1.8608 ++
1.8609 ++The other known way of making bit-fields work is to define
1.8610 ++STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT.
1.8611 ++Then every structure can be accessed with fullwords.
1.8612 ++
1.8613 ++Unless the machine has bit-field instructions or you define
1.8614 ++STRUCTURE_SIZE_BOUNDARY that way, you must define
1.8615 ++PCC_BITFIELD_TYPE_MATTERS to have a nonzero value.
1.8616 ++
1.8617 ++If your aim is to make GCC use the same conventions for laying out
1.8618 ++bit-fields as are used by another compiler, here is how to investigate
1.8619 ++what the other compiler does. Compile and run this program:
1.8620 ++
1.8621 ++struct foo1
1.8622 ++{
1.8623 ++ char x;
1.8624 ++ char :0;
1.8625 ++ char y;
1.8626 ++};
1.8627 ++
1.8628 ++struct foo2
1.8629 ++{
1.8630 ++ char x;
1.8631 ++ int :0;
1.8632 ++ char y;
1.8633 ++};
1.8634 ++
1.8635 ++main ()
1.8636 ++{
1.8637 ++ printf ("Size of foo1 is %d\n",
1.8638 ++ sizeof (struct foo1));
1.8639 ++ printf ("Size of foo2 is %d\n",
1.8640 ++ sizeof (struct foo2));
1.8641 ++ exit (0);
1.8642 ++}
1.8643 ++
1.8644 ++If this prints 2 and 5, then the compiler's behavior is what you would
1.8645 ++get from PCC_BITFIELD_TYPE_MATTERS.
1.8646 ++*/
1.8647 ++#define PCC_BITFIELD_TYPE_MATTERS 1
1.8648 ++
1.8649 ++
1.8650 ++/******************************************************************************
1.8651 ++ * Layout of Source Language Data Types
1.8652 ++ *****************************************************************************/
1.8653 ++
1.8654 ++/*
1.8655 ++A C expression for the size in bits of the type int on the
1.8656 ++target machine. If you don't define this, the default is one word.
1.8657 ++*/
1.8658 ++#define INT_TYPE_SIZE 32
1.8659 ++
1.8660 ++/*
1.8661 ++A C expression for the size in bits of the type short on the
1.8662 ++target machine. If you don't define this, the default is half a word. (If
1.8663 ++this would be less than one storage unit, it is rounded up to one unit.)
1.8664 ++*/
1.8665 ++#define SHORT_TYPE_SIZE 16
1.8666 ++
1.8667 ++/*
1.8668 ++A C expression for the size in bits of the type long on the
1.8669 ++target machine. If you don't define this, the default is one word.
1.8670 ++*/
1.8671 ++#define LONG_TYPE_SIZE 32
1.8672 ++
1.8673 ++
1.8674 ++/*
1.8675 ++A C expression for the size in bits of the type long long on the
1.8676 ++target machine. If you don't define this, the default is two
1.8677 ++words. If you want to support GNU Ada on your machine, the value of this
1.8678 ++macro must be at least 64.
1.8679 ++*/
1.8680 ++#define LONG_LONG_TYPE_SIZE 64
1.8681 ++
1.8682 ++/*
1.8683 ++A C expression for the size in bits of the type char on the
1.8684 ++target machine. If you don't define this, the default is
1.8685 ++BITS_PER_UNIT.
1.8686 ++*/
1.8687 ++#define CHAR_TYPE_SIZE 8
1.8688 ++
1.8689 ++
1.8690 ++/*
1.8691 ++A C expression for the size in bits of the C++ type bool and
1.8692 ++C99 type _Bool on the target machine. If you don't define
1.8693 ++this, and you probably shouldn't, the default is CHAR_TYPE_SIZE.
1.8694 ++*/
1.8695 ++#define BOOL_TYPE_SIZE 8
1.8696 ++
1.8697 ++
1.8698 ++/*
1.8699 ++An expression whose value is 1 or 0, according to whether the type
1.8700 ++char should be signed or unsigned by default. The user can
1.8701 ++always override this default with the options -fsigned-char
1.8702 ++and -funsigned-char.
1.8703 ++*/
1.8704 ++/* We are using unsigned char */
1.8705 ++#define DEFAULT_SIGNED_CHAR 0
1.8706 ++
1.8707 ++
1.8708 ++/*
1.8709 ++A C expression for a string describing the name of the data type to use
1.8710 ++for size values. The typedef name size_t is defined using the
1.8711 ++contents of the string.
1.8712 ++
1.8713 ++The string can contain more than one keyword. If so, separate them with
1.8714 ++spaces, and write first any length keyword, then unsigned if
1.8715 ++appropriate, and finally int. The string must exactly match one
1.8716 ++of the data type names defined in the function
1.8717 ++init_decl_processing in the file c-decl.c. You may not
1.8718 ++omit int or change the order - that would cause the compiler to
1.8719 ++crash on startup.
1.8720 ++
1.8721 ++If you don't define this macro, the default is "long unsigned int".
1.8722 ++*/
1.8723 ++#define SIZE_TYPE "long unsigned int"
1.8724 ++
1.8725 ++/*
1.8726 ++A C expression for a string describing the name of the data type to use
1.8727 ++for the result of subtracting two pointers. The typedef name
1.8728 ++ptrdiff_t is defined using the contents of the string. See
1.8729 ++SIZE_TYPE above for more information.
1.8730 ++
1.8731 ++If you don't define this macro, the default is "long int".
1.8732 ++*/
1.8733 ++#define PTRDIFF_TYPE "long int"
1.8734 ++
1.8735 ++
1.8736 ++/*
1.8737 ++A C expression for the size in bits of the data type for wide
1.8738 ++characters. This is used in cpp, which cannot make use of
1.8739 ++WCHAR_TYPE.
1.8740 ++*/
1.8741 ++#define WCHAR_TYPE_SIZE 32
1.8742 ++
1.8743 ++
1.8744 ++/*
1.8745 ++A C expression for a string describing the name of the data type to
1.8746 ++use for wide characters passed to printf and returned from
1.8747 ++getwc. The typedef name wint_t is defined using the
1.8748 ++contents of the string. See SIZE_TYPE above for more
1.8749 ++information.
1.8750 ++
1.8751 ++If you don't define this macro, the default is "unsigned int".
1.8752 ++*/
1.8753 ++#define WINT_TYPE "unsigned int"
1.8754 ++
1.8755 ++/*
1.8756 ++A C expression for a string describing the name of the data type that
1.8757 ++can represent any value of any standard or extended signed integer type.
1.8758 ++The typedef name intmax_t is defined using the contents of the
1.8759 ++string. See SIZE_TYPE above for more information.
1.8760 ++
1.8761 ++If you don't define this macro, the default is the first of
1.8762 ++"int", "long int", or "long long int" that has as
1.8763 ++much precision as long long int.
1.8764 ++*/
1.8765 ++#define INTMAX_TYPE "long long int"
1.8766 ++
1.8767 ++/*
1.8768 ++A C expression for a string describing the name of the data type that
1.8769 ++can represent any value of any standard or extended unsigned integer
1.8770 ++type. The typedef name uintmax_t is defined using the contents
1.8771 ++of the string. See SIZE_TYPE above for more information.
1.8772 ++
1.8773 ++If you don't define this macro, the default is the first of
1.8774 ++"unsigned int", "long unsigned int", or "long long unsigned int"
1.8775 ++that has as much precision as long long unsigned int.
1.8776 ++*/
1.8777 ++#define UINTMAX_TYPE "long long unsigned int"
1.8778 ++
1.8779 ++
1.8780 ++/******************************************************************************
1.8781 ++ * Register Usage
1.8782 ++ *****************************************************************************/
1.8783 ++
1.8784 ++/* Convert from gcc internal register number to register number
1.8785 ++ used in assembly code */
1.8786 ++#define ASM_REGNUM(reg) (LAST_REGNUM - (reg))
1.8787 ++#define ASM_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg))
1.8788 ++
1.8789 ++/* Convert between register number used in assembly to gcc
1.8790 ++ internal register number */
1.8791 ++#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg))
1.8792 ++#define INTERNAL_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg))
1.8793 ++
1.8794 ++/** Basic Characteristics of Registers **/
1.8795 ++
1.8796 ++/*
1.8797 ++Number of hardware registers known to the compiler. They receive
1.8798 ++numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first
1.8799 ++pseudo register's number really is assigned the number
1.8800 ++FIRST_PSEUDO_REGISTER.
1.8801 ++*/
1.8802 ++#define FIRST_PSEUDO_REGISTER (LAST_FP_REGNUM + 1)
1.8803 ++
1.8804 ++#define FIRST_REGNUM 0
1.8805 ++#define LAST_REGNUM 15
1.8806 ++#define NUM_FP_REGS 16
1.8807 ++#define FIRST_FP_REGNUM 16
1.8808 ++#define LAST_FP_REGNUM (16+NUM_FP_REGS-1)
1.8809 ++
1.8810 ++/*
1.8811 ++An initializer that says which registers are used for fixed purposes
1.8812 ++all throughout the compiled code and are therefore not available for
1.8813 ++general allocation. These would include the stack pointer, the frame
1.8814 ++pointer (except on machines where that can be used as a general
1.8815 ++register when no frame pointer is needed), the program counter on
1.8816 ++machines where that is considered one of the addressable registers,
1.8817 ++and any other numbered register with a standard use.
1.8818 ++
1.8819 ++This information is expressed as a sequence of numbers, separated by
1.8820 ++commas and surrounded by braces. The nth number is 1 if
1.8821 ++register n is fixed, 0 otherwise.
1.8822 ++
1.8823 ++The table initialized from this macro, and the table initialized by
1.8824 ++the following one, may be overridden at run time either automatically,
1.8825 ++by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by
1.8826 ++the user with the command options -ffixed-[reg],
1.8827 ++-fcall-used-[reg] and -fcall-saved-[reg].
1.8828 ++*/
1.8829 ++
1.8830 ++/* The internal gcc register numbers are reversed
1.8831 ++ compared to the real register numbers since
1.8832 ++ gcc expects data types stored over multiple
1.8833 ++ registers in the register file to be big endian
1.8834 ++ if the memory layout is big endian. But this
1.8835 ++ is not the case for avr32 so we fake a big
1.8836 ++ endian register file. */
1.8837 ++
1.8838 ++#define FIXED_REGISTERS { \
1.8839 ++ 1, /* Program Counter */ \
1.8840 ++ 0, /* Link Register */ \
1.8841 ++ 1, /* Stack Pointer */ \
1.8842 ++ 0, /* r12 */ \
1.8843 ++ 0, /* r11 */ \
1.8844 ++ 0, /* r10 */ \
1.8845 ++ 0, /* r9 */ \
1.8846 ++ 0, /* r8 */ \
1.8847 ++ 0, /* r7 */ \
1.8848 ++ 0, /* r6 */ \
1.8849 ++ 0, /* r5 */ \
1.8850 ++ 0, /* r4 */ \
1.8851 ++ 0, /* r3 */ \
1.8852 ++ 0, /* r2 */ \
1.8853 ++ 0, /* r1 */ \
1.8854 ++ 0, /* r0 */ \
1.8855 ++ 0, /* f15 */ \
1.8856 ++ 0, /* f14 */ \
1.8857 ++ 0, /* f13 */ \
1.8858 ++ 0, /* f12 */ \
1.8859 ++ 0, /* f11 */ \
1.8860 ++ 0, /* f10 */ \
1.8861 ++ 0, /* f9 */ \
1.8862 ++ 0, /* f8 */ \
1.8863 ++ 0, /* f7 */ \
1.8864 ++ 0, /* f6 */ \
1.8865 ++ 0, /* f5 */ \
1.8866 ++ 0, /* f4 */ \
1.8867 ++ 0, /* f3 */ \
1.8868 ++ 0, /* f2*/ \
1.8869 ++ 0, /* f1 */ \
1.8870 ++ 0 /* f0 */ \
1.8871 ++}
1.8872 ++
1.8873 ++/*
1.8874 ++Like FIXED_REGISTERS but has 1 for each register that is
1.8875 ++clobbered (in general) by function calls as well as for fixed
1.8876 ++registers. This macro therefore identifies the registers that are not
1.8877 ++available for general allocation of values that must live across
1.8878 ++function calls.
1.8879 ++
1.8880 ++If a register has 0 in CALL_USED_REGISTERS, the compiler
1.8881 ++automatically saves it on function entry and restores it on function
1.8882 ++exit, if the register is used within the function.
1.8883 ++*/
1.8884 ++#define CALL_USED_REGISTERS { \
1.8885 ++ 1, /* Program Counter */ \
1.8886 ++ 0, /* Link Register */ \
1.8887 ++ 1, /* Stack Pointer */ \
1.8888 ++ 1, /* r12 */ \
1.8889 ++ 1, /* r11 */ \
1.8890 ++ 1, /* r10 */ \
1.8891 ++ 1, /* r9 */ \
1.8892 ++ 1, /* r8 */ \
1.8893 ++ 0, /* r7 */ \
1.8894 ++ 0, /* r6 */ \
1.8895 ++ 0, /* r5 */ \
1.8896 ++ 0, /* r4 */ \
1.8897 ++ 0, /* r3 */ \
1.8898 ++ 0, /* r2 */ \
1.8899 ++ 0, /* r1 */ \
1.8900 ++ 0, /* r0 */ \
1.8901 ++ 1, /* f15 */ \
1.8902 ++ 1, /* f14 */ \
1.8903 ++ 1, /* f13 */ \
1.8904 ++ 1, /* f12 */ \
1.8905 ++ 1, /* f11 */ \
1.8906 ++ 1, /* f10 */ \
1.8907 ++ 1, /* f9 */ \
1.8908 ++ 1, /* f8 */ \
1.8909 ++ 0, /* f7 */ \
1.8910 ++ 0, /* f6 */ \
1.8911 ++ 0, /* f5 */ \
1.8912 ++ 0, /* f4 */ \
1.8913 ++ 0, /* f3 */ \
1.8914 ++ 0, /* f2*/ \
1.8915 ++ 0, /* f1*/ \
1.8916 ++ 0, /* f0 */ \
1.8917 ++}
1.8918 ++
1.8919 ++/* Interrupt functions can only use registers that have already been
1.8920 ++ saved by the prologue, even if they would normally be
1.8921 ++ call-clobbered. */
1.8922 ++#define HARD_REGNO_RENAME_OK(SRC, DST) \
1.8923 ++ (! IS_INTERRUPT (cfun->machine->func_type) || \
1.8924 ++ regs_ever_live[DST])
1.8925 ++
1.8926 ++
1.8927 ++/*
1.8928 ++Zero or more C statements that may conditionally modify five variables
1.8929 ++fixed_regs, call_used_regs, global_regs,
1.8930 ++reg_names, and reg_class_contents, to take into account
1.8931 ++any dependence of these register sets on target flags. The first three
1.8932 ++of these are of type char [] (interpreted as Boolean vectors).
1.8933 ++global_regs is a const char *[], and
1.8934 ++reg_class_contents is a HARD_REG_SET. Before the macro is
1.8935 ++called, fixed_regs, call_used_regs,
1.8936 ++reg_class_contents, and reg_names have been initialized
1.8937 ++from FIXED_REGISTERS, CALL_USED_REGISTERS,
1.8938 ++REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively.
1.8939 ++global_regs has been cleared, and any -ffixed-[reg],
1.8940 ++-fcall-used-[reg] and -fcall-saved-[reg]
1.8941 ++command options have been applied.
1.8942 ++
1.8943 ++You need not define this macro if it has no work to do.
1.8944 ++
1.8945 ++If the usage of an entire class of registers depends on the target
1.8946 ++flags, you may indicate this to GCC by using this macro to modify
1.8947 ++fixed_regs and call_used_regs to 1 for each of the
1.8948 ++registers in the classes which should not be used by GCC. Also define
1.8949 ++the macro REG_CLASS_FROM_LETTER to return NO_REGS if it
1.8950 ++is called with a letter for a class that shouldn't be used.
1.8951 ++
1.8952 ++ (However, if this class is not included in GENERAL_REGS and all
1.8953 ++of the insn patterns whose constraints permit this class are
1.8954 ++controlled by target switches, then GCC will automatically avoid using
1.8955 ++these registers when the target switches are opposed to them.)
1.8956 ++*/
1.8957 ++#define CONDITIONAL_REGISTER_USAGE \
1.8958 ++ do \
1.8959 ++ { \
1.8960 ++ int regno; \
1.8961 ++ \
1.8962 ++ if (TARGET_SOFT_FLOAT) \
1.8963 ++ { \
1.8964 ++ for (regno = FIRST_FP_REGNUM; \
1.8965 ++ regno <= LAST_FP_REGNUM; ++regno) \
1.8966 ++ fixed_regs[regno] = call_used_regs[regno] = 1; \
1.8967 ++ } \
1.8968 ++ if (flag_pic) \
1.8969 ++ { \
1.8970 ++ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
1.8971 ++ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
1.8972 ++ } \
1.8973 ++ } \
1.8974 ++ while (0)
1.8975 ++
1.8976 ++
1.8977 ++/*
1.8978 ++If the program counter has a register number, define this as that
1.8979 ++register number. Otherwise, do not define it.
1.8980 ++*/
1.8981 ++
1.8982 ++#define LAST_AVR32_REGNUM 16
1.8983 ++
1.8984 ++
1.8985 ++/** Order of Allocation of Registers **/
1.8986 ++
1.8987 ++/*
1.8988 ++If defined, an initializer for a vector of integers, containing the
1.8989 ++numbers of hard registers in the order in which GCC should prefer
1.8990 ++to use them (from most preferred to least).
1.8991 ++
1.8992 ++If this macro is not defined, registers are used lowest numbered first
1.8993 ++(all else being equal).
1.8994 ++
1.8995 ++One use of this macro is on machines where the highest numbered
1.8996 ++registers must always be saved and the save-multiple-registers
1.8997 ++instruction supports only sequences of consecutive registers. On such
1.8998 ++machines, define REG_ALLOC_ORDER to be an initializer that lists
1.8999 ++the highest numbered allocable register first.
1.9000 ++*/
1.9001 ++#define REG_ALLOC_ORDER \
1.9002 ++{ \
1.9003 ++ INTERNAL_REGNUM(8), \
1.9004 ++ INTERNAL_REGNUM(9), \
1.9005 ++ INTERNAL_REGNUM(10), \
1.9006 ++ INTERNAL_REGNUM(11), \
1.9007 ++ INTERNAL_REGNUM(12), \
1.9008 ++ LR_REGNUM, \
1.9009 ++ INTERNAL_REGNUM(7), \
1.9010 ++ INTERNAL_REGNUM(6), \
1.9011 ++ INTERNAL_REGNUM(5), \
1.9012 ++ INTERNAL_REGNUM(4), \
1.9013 ++ INTERNAL_REGNUM(3), \
1.9014 ++ INTERNAL_REGNUM(2), \
1.9015 ++ INTERNAL_REGNUM(1), \
1.9016 ++ INTERNAL_REGNUM(0), \
1.9017 ++ INTERNAL_FP_REGNUM(15), \
1.9018 ++ INTERNAL_FP_REGNUM(14), \
1.9019 ++ INTERNAL_FP_REGNUM(13), \
1.9020 ++ INTERNAL_FP_REGNUM(12), \
1.9021 ++ INTERNAL_FP_REGNUM(11), \
1.9022 ++ INTERNAL_FP_REGNUM(10), \
1.9023 ++ INTERNAL_FP_REGNUM(9), \
1.9024 ++ INTERNAL_FP_REGNUM(8), \
1.9025 ++ INTERNAL_FP_REGNUM(7), \
1.9026 ++ INTERNAL_FP_REGNUM(6), \
1.9027 ++ INTERNAL_FP_REGNUM(5), \
1.9028 ++ INTERNAL_FP_REGNUM(4), \
1.9029 ++ INTERNAL_FP_REGNUM(3), \
1.9030 ++ INTERNAL_FP_REGNUM(2), \
1.9031 ++ INTERNAL_FP_REGNUM(1), \
1.9032 ++ INTERNAL_FP_REGNUM(0), \
1.9033 ++ SP_REGNUM, \
1.9034 ++ PC_REGNUM \
1.9035 ++}
1.9036 ++
1.9037 ++
1.9038 ++/** How Values Fit in Registers **/
1.9039 ++
1.9040 ++/*
1.9041 ++A C expression for the number of consecutive hard registers, starting
1.9042 ++at register number REGNO, required to hold a value of mode
1.9043 ++MODE.
1.9044 ++
1.9045 ++On a machine where all registers are exactly one word, a suitable
1.9046 ++definition of this macro is
1.9047 ++
1.9048 ++#define HARD_REGNO_NREGS(REGNO, MODE) \
1.9049 ++ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
1.9050 ++ / UNITS_PER_WORD)
1.9051 ++*/
1.9052 ++#define HARD_REGNO_NREGS(REGNO, MODE) \
1.9053 ++ ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD))
1.9054 ++
1.9055 ++/*
1.9056 ++A C expression that is nonzero if it is permissible to store a value
1.9057 ++of mode MODE in hard register number REGNO (or in several
1.9058 ++registers starting with that one). For a machine where all registers
1.9059 ++are equivalent, a suitable definition is
1.9060 ++
1.9061 ++ #define HARD_REGNO_MODE_OK(REGNO, MODE) 1
1.9062 ++
1.9063 ++You need not include code to check for the numbers of fixed registers,
1.9064 ++because the allocation mechanism considers them to be always occupied.
1.9065 ++
1.9066 ++On some machines, double-precision values must be kept in even/odd
1.9067 ++register pairs. You can implement that by defining this macro to reject
1.9068 ++odd register numbers for such modes.
1.9069 ++
1.9070 ++The minimum requirement for a mode to be OK in a register is that the
1.9071 ++mov[mode] instruction pattern support moves between the
1.9072 ++register and other hard register in the same class and that moving a
1.9073 ++value into the register and back out not alter it.
1.9074 ++
1.9075 ++Since the same instruction used to move word_mode will work for
1.9076 ++all narrower integer modes, it is not necessary on any machine for
1.9077 ++HARD_REGNO_MODE_OK to distinguish between these modes, provided
1.9078 ++you define patterns movhi, etc., to take advantage of this. This
1.9079 ++is useful because of the interaction between HARD_REGNO_MODE_OK
1.9080 ++and MODES_TIEABLE_P; it is very desirable for all integer modes
1.9081 ++to be tieable.
1.9082 ++
1.9083 ++Many machines have special registers for floating point arithmetic.
1.9084 ++Often people assume that floating point machine modes are allowed only
1.9085 ++in floating point registers. This is not true. Any registers that
1.9086 ++can hold integers can safely hold a floating point machine
1.9087 ++mode, whether or not floating arithmetic can be done on it in those
1.9088 ++registers. Integer move instructions can be used to move the values.
1.9089 ++
1.9090 ++On some machines, though, the converse is true: fixed-point machine
1.9091 ++modes may not go in floating registers. This is true if the floating
1.9092 ++registers normalize any value stored in them, because storing a
1.9093 ++non-floating value there would garble it. In this case,
1.9094 ++HARD_REGNO_MODE_OK should reject fixed-point machine modes in
1.9095 ++floating registers. But if the floating registers do not automatically
1.9096 ++normalize, if you can store any bit pattern in one and retrieve it
1.9097 ++unchanged without a trap, then any machine mode may go in a floating
1.9098 ++register, so you can define this macro to say so.
1.9099 ++
1.9100 ++The primary significance of special floating registers is rather that
1.9101 ++they are the registers acceptable in floating point arithmetic
1.9102 ++instructions. However, this is of no concern to
1.9103 ++HARD_REGNO_MODE_OK. You handle it by writing the proper
1.9104 ++constraints for those instructions.
1.9105 ++
1.9106 ++On some machines, the floating registers are especially slow to access,
1.9107 ++so that it is better to store a value in a stack frame than in such a
1.9108 ++register if floating point arithmetic is not being done. As long as the
1.9109 ++floating registers are not in class GENERAL_REGS, they will not
1.9110 ++be used unless some pattern's constraint asks for one.
1.9111 ++*/
1.9112 ++#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE)
1.9113 ++
1.9114 ++/*
1.9115 ++A C expression that is nonzero if a value of mode
1.9116 ++MODE1 is accessible in mode MODE2 without copying.
1.9117 ++
1.9118 ++If HARD_REGNO_MODE_OK(R, MODE1) and
1.9119 ++HARD_REGNO_MODE_OK(R, MODE2) are always the same for
1.9120 ++any R, then MODES_TIEABLE_P(MODE1, MODE2)
1.9121 ++should be nonzero. If they differ for any R, you should define
1.9122 ++this macro to return zero unless some other mechanism ensures the
1.9123 ++accessibility of the value in a narrower mode.
1.9124 ++
1.9125 ++You should define this macro to return nonzero in as many cases as
1.9126 ++possible since doing so will allow GCC to perform better register
1.9127 ++allocation.
1.9128 ++*/
1.9129 ++#define MODES_TIEABLE_P(MODE1, MODE2) \
1.9130 ++ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
1.9131 ++
1.9132 ++
1.9133 ++
1.9134 ++/******************************************************************************
1.9135 ++ * Register Classes
1.9136 ++ *****************************************************************************/
1.9137 ++
1.9138 ++/*
1.9139 ++An enumeral type that must be defined with all the register class names
1.9140 ++as enumeral values. NO_REGS must be first. ALL_REGS
1.9141 ++must be the last register class, followed by one more enumeral value,
1.9142 ++LIM_REG_CLASSES, which is not a register class but rather
1.9143 ++tells how many classes there are.
1.9144 ++
1.9145 ++Each register class has a number, which is the value of casting
1.9146 ++the class name to type int. The number serves as an index
1.9147 ++in many of the tables described below.
1.9148 ++*/
1.9149 ++enum reg_class
1.9150 ++{
1.9151 ++ NO_REGS,
1.9152 ++ GENERAL_REGS,
1.9153 ++ FP_REGS,
1.9154 ++ ALL_REGS,
1.9155 ++ LIM_REG_CLASSES
1.9156 ++};
1.9157 ++
1.9158 ++/*
1.9159 ++The number of distinct register classes, defined as follows:
1.9160 ++ #define N_REG_CLASSES (int) LIM_REG_CLASSES
1.9161 ++*/
1.9162 ++#define N_REG_CLASSES (int)LIM_REG_CLASSES
1.9163 ++
1.9164 ++/*
1.9165 ++An initializer containing the names of the register classes as C string
1.9166 ++constants. These names are used in writing some of the debugging dumps.
1.9167 ++*/
1.9168 ++#define REG_CLASS_NAMES \
1.9169 ++{ \
1.9170 ++ "NO_REGS", \
1.9171 ++ "GENERAL_REGS", \
1.9172 ++ "FLOATING_POINT_REGS", \
1.9173 ++ "ALL_REGS" \
1.9174 ++}
1.9175 ++
1.9176 ++/*
1.9177 ++An initializer containing the contents of the register classes, as integers
1.9178 ++which are bit masks. The nth integer specifies the contents of class
1.9179 ++n. The way the integer mask is interpreted is that
1.9180 ++register r is in the class if mask & (1 << r) is 1.
1.9181 ++
1.9182 ++When the machine has more than 32 registers, an integer does not suffice.
1.9183 ++Then the integers are replaced by sub-initializers, braced groupings containing
1.9184 ++several integers. Each sub-initializer must be suitable as an initializer
1.9185 ++for the type HARD_REG_SET which is defined in hard-reg-set.h.
1.9186 ++In this situation, the first integer in each sub-initializer corresponds to
1.9187 ++registers 0 through 31, the second integer to registers 32 through 63, and
1.9188 ++so on.
1.9189 ++*/
1.9190 ++#define REG_CLASS_CONTENTS { \
1.9191 ++ {0x00000000}, /* NO_REGS */ \
1.9192 ++ {0x0000FFFF}, /* GENERAL_REGS */ \
1.9193 ++ {0xFFFF0000}, /* FP_REGS */ \
1.9194 ++ {0x7FFFFFFF}, /* ALL_REGS */ \
1.9195 ++}
1.9196 ++
1.9197 ++
1.9198 ++/*
1.9199 ++A C expression whose value is a register class containing hard register
1.9200 ++REGNO. In general there is more than one such class; choose a class
1.9201 ++which is minimal, meaning that no smaller class also contains the
1.9202 ++register.
1.9203 ++*/
1.9204 ++#define REGNO_REG_CLASS(REGNO) ((REGNO < 16) ? GENERAL_REGS : FP_REGS)
1.9205 ++
1.9206 ++/*
1.9207 ++A macro whose definition is the name of the class to which a valid
1.9208 ++base register must belong. A base register is one used in an address
1.9209 ++which is the register value plus a displacement.
1.9210 ++*/
1.9211 ++#define BASE_REG_CLASS GENERAL_REGS
1.9212 ++
1.9213 ++/*
1.9214 ++This is a variation of the BASE_REG_CLASS macro which allows
1.9215 ++the selection of a base register in a mode depenedent manner. If
1.9216 ++mode is VOIDmode then it should return the same value as
1.9217 ++BASE_REG_CLASS.
1.9218 ++*/
1.9219 ++#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS
1.9220 ++
1.9221 ++/*
1.9222 ++A macro whose definition is the name of the class to which a valid
1.9223 ++index register must belong. An index register is one used in an
1.9224 ++address where its value is either multiplied by a scale factor or
1.9225 ++added to another register (as well as added to a displacement).
1.9226 ++*/
1.9227 ++#define INDEX_REG_CLASS BASE_REG_CLASS
1.9228 ++
1.9229 ++/*
1.9230 ++A C expression which defines the machine-dependent operand constraint
1.9231 ++letters for register classes. If CHAR is such a letter, the
1.9232 ++value should be the register class corresponding to it. Otherwise,
1.9233 ++the value should be NO_REGS. The register letter r,
1.9234 ++corresponding to class GENERAL_REGS, will not be passed
1.9235 ++to this macro; you do not need to handle it.
1.9236 ++*/
1.9237 ++#define REG_CLASS_FROM_LETTER(CHAR) ((CHAR) == 'f' ? FP_REGS : NO_REGS)
1.9238 ++
1.9239 ++
1.9240 ++/* These assume that REGNO is a hard or pseudo reg number.
1.9241 ++ They give nonzero only if REGNO is a hard reg of the suitable class
1.9242 ++ or a pseudo reg currently allocated to a suitable hard reg.
1.9243 ++ Since they use reg_renumber, they are safe only once reg_renumber
1.9244 ++ has been allocated, which happens in local-alloc.c. */
1.9245 ++#define TEST_REGNO(R, TEST, VALUE) \
1.9246 ++ ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
1.9247 ++
1.9248 ++/*
1.9249 ++A C expression which is nonzero if register number num is suitable for use as a base
1.9250 ++register in operand addresses. It may be either a suitable hard register or a pseudo
1.9251 ++register that has been allocated such a hard register.
1.9252 ++*/
1.9253 ++#define REGNO_OK_FOR_BASE_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
1.9254 ++
1.9255 ++/*
1.9256 ++A C expression which is nonzero if register number NUM is
1.9257 ++suitable for use as an index register in operand addresses. It may be
1.9258 ++either a suitable hard register or a pseudo register that has been
1.9259 ++allocated such a hard register.
1.9260 ++
1.9261 ++The difference between an index register and a base register is that
1.9262 ++the index register may be scaled. If an address involves the sum of
1.9263 ++two registers, neither one of them scaled, then either one may be
1.9264 ++labeled the ``base'' and the other the ``index''; but whichever
1.9265 ++labeling is used must fit the machine's constraints of which registers
1.9266 ++may serve in each capacity. The compiler will try both labelings,
1.9267 ++looking for one that is valid, and will reload one or both registers
1.9268 ++only if neither labeling works.
1.9269 ++*/
1.9270 ++#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
1.9271 ++
1.9272 ++/*
1.9273 ++A C expression that places additional restrictions on the register class
1.9274 ++to use when it is necessary to copy value X into a register in class
1.9275 ++CLASS. The value is a register class; perhaps CLASS, or perhaps
1.9276 ++another, smaller class. On many machines, the following definition is
1.9277 ++safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
1.9278 ++
1.9279 ++Sometimes returning a more restrictive class makes better code. For
1.9280 ++example, on the 68000, when X is an integer constant that is in range
1.9281 ++for a 'moveq' instruction, the value of this macro is always
1.9282 ++DATA_REGS as long as CLASS includes the data registers.
1.9283 ++Requiring a data register guarantees that a 'moveq' will be used.
1.9284 ++
1.9285 ++If X is a const_double, by returning NO_REGS
1.9286 ++you can force X into a memory constant. This is useful on
1.9287 ++certain machines where immediate floating values cannot be loaded into
1.9288 ++certain kinds of registers.
1.9289 ++*/
1.9290 ++#define PREFERRED_RELOAD_CLASS(X, CLASS) CLASS
1.9291 ++
1.9292 ++
1.9293 ++
1.9294 ++/*
1.9295 ++A C expression for the maximum number of consecutive registers
1.9296 ++of class CLASS needed to hold a value of mode MODE.
1.9297 ++
1.9298 ++This is closely related to the macro HARD_REGNO_NREGS. In fact,
1.9299 ++the value of the macro CLASS_MAX_NREGS(CLASS, MODE)
1.9300 ++should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE)
1.9301 ++for all REGNO values in the class CLASS.
1.9302 ++
1.9303 ++This macro helps control the handling of multiple-word values
1.9304 ++in the reload pass.
1.9305 ++*/
1.9306 ++#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \
1.9307 ++ (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1.9308 ++
1.9309 ++
1.9310 ++/*
1.9311 ++ Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P
1.9312 ++ in order to support constraints with more than one letter.
1.9313 ++ Only two letters are then used for constant constraints,
1.9314 ++ the letter 'K' and the letter 'I'. The constraint starting with
1.9315 ++ these letters must consist of four characters. The character following
1.9316 ++ 'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify
1.9317 ++ if the constant is zero or sign extended. The last two characters specify
1.9318 ++ the length in bits of the constant. The base constraint letter 'I' means
1.9319 ++ that this is an negated constant, meaning that actually -VAL should be
1.9320 ++ checked to lie withing the valid range instead of VAL which is used when
1.9321 ++ 'K' is the base constraint letter.
1.9322 ++
1.9323 ++*/
1.9324 ++
1.9325 ++#define CONSTRAINT_LEN(C, STR) \
1.9326 ++ ( ((C) == 'K' || (C) == 'I') ? 4 : \
1.9327 ++ ((C) == 'R') ? 5 : \
1.9328 ++ ((C) == 'P') ? -1 : \
1.9329 ++ DEFAULT_CONSTRAINT_LEN((C), (STR)) )
1.9330 ++
1.9331 ++#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \
1.9332 ++ avr32_const_ok_for_constraint_p(VALUE, C, STR)
1.9333 ++
1.9334 ++/*
1.9335 ++A C expression that defines the machine-dependent operand constraint
1.9336 ++letters that specify particular ranges of const_double values ('G' or 'H').
1.9337 ++
1.9338 ++If C is one of those letters, the expression should check that
1.9339 ++VALUE, an RTX of code const_double, is in the appropriate
1.9340 ++range and return 1 if so, 0 otherwise. If C is not one of those
1.9341 ++letters, the value should be 0 regardless of VALUE.
1.9342 ++
1.9343 ++const_double is used for all floating-point constants and for
1.9344 ++DImode fixed-point constants. A given letter can accept either
1.9345 ++or both kinds of values. It can use GET_MODE to distinguish
1.9346 ++between these kinds.
1.9347 ++*/
1.9348 ++#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \
1.9349 ++ ((C) == 'G' ? avr32_const_double_immediate(OP) : 0)
1.9350 ++
1.9351 ++/*
1.9352 ++A C expression that defines the optional machine-dependent constraint
1.9353 ++letters that can be used to segregate specific types of operands, usually
1.9354 ++memory references, for the target machine. Any letter that is not
1.9355 ++elsewhere defined and not matched by REG_CLASS_FROM_LETTER
1.9356 ++may be used. Normally this macro will not be defined.
1.9357 ++
1.9358 ++If it is required for a particular target machine, it should return 1
1.9359 ++if VALUE corresponds to the operand type represented by the
1.9360 ++constraint letter C. If C is not defined as an extra
1.9361 ++constraint, the value returned should be 0 regardless of VALUE.
1.9362 ++
1.9363 ++For example, on the ROMP, load instructions cannot have their output
1.9364 ++in r0 if the memory reference contains a symbolic address. Constraint
1.9365 ++letter 'Q' is defined as representing a memory address that does
1.9366 ++not contain a symbolic address. An alternative is specified with
1.9367 ++a 'Q' constraint on the input and 'r' on the output. The next
1.9368 ++alternative specifies 'm' on the input and a register class that
1.9369 ++does not include r0 on the output.
1.9370 ++*/
1.9371 ++#define EXTRA_CONSTRAINT_STR(OP, C, STR) \
1.9372 ++ ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) : \
1.9373 ++ (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) || \
1.9374 ++ (avr32_imm_disp_memory_operand(OP, GET_MODE(OP)) \
1.9375 ++ && avr32_const_ok_for_constraint_p( \
1.9376 ++ INTVAL(XEXP(XEXP(OP, 0), 1)), \
1.9377 ++ (STR)[1], &(STR)[1]))) : \
1.9378 ++ (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) : \
1.9379 ++ (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \
1.9380 ++ (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \
1.9381 ++ (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \
1.9382 ++ (C) == 'Q' ? avr32_non_rmw_memory_operand(OP, GET_MODE(OP)) : \
1.9383 ++ (C) == 'Y' ? avr32_rmw_memory_operand(OP, GET_MODE(OP)) : \
1.9384 ++ 0)
1.9385 ++
1.9386 ++
1.9387 ++#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \
1.9388 ++ ((C) == 'Q') || \
1.9389 ++ ((C) == 'S') || \
1.9390 ++ ((C) == 'Y') || \
1.9391 ++ ((C) == 'Z') )
1.9392 ++
1.9393 ++
1.9394 ++/* Returns nonzero if op is a function SYMBOL_REF which
1.9395 ++ can be called using an rcall instruction */
1.9396 ++#define SYMBOL_REF_RCALL_FUNCTION_P(op) \
1.9397 ++ ( GET_CODE(op) == SYMBOL_REF \
1.9398 ++ && SYMBOL_REF_FUNCTION_P(op) \
1.9399 ++ && SYMBOL_REF_LOCAL_P(op) \
1.9400 ++ && !SYMBOL_REF_EXTERNAL_P(op) \
1.9401 ++ && !TARGET_HAS_ASM_ADDR_PSEUDOS )
1.9402 ++
1.9403 ++/******************************************************************************
1.9404 ++ * Stack Layout and Calling Conventions
1.9405 ++ *****************************************************************************/
1.9406 ++
1.9407 ++/** Basic Stack Layout **/
1.9408 ++
1.9409 ++/*
1.9410 ++Define this macro if pushing a word onto the stack moves the stack
1.9411 ++pointer to a smaller address.
1.9412 ++
1.9413 ++When we say, ``define this macro if ...,'' it means that the
1.9414 ++compiler checks this macro only with #ifdef so the precise
1.9415 ++definition used does not matter.
1.9416 ++*/
1.9417 ++/* pushm decrece SP: *(--SP) <-- Rx */
1.9418 ++#define STACK_GROWS_DOWNWARD
1.9419 ++
1.9420 ++/*
1.9421 ++This macro defines the operation used when something is pushed
1.9422 ++on the stack. In RTL, a push operation will be
1.9423 ++(set (mem (STACK_PUSH_CODE (reg sp))) ...)
1.9424 ++
1.9425 ++The choices are PRE_DEC, POST_DEC, PRE_INC,
1.9426 ++and POST_INC. Which of these is correct depends on
1.9427 ++the stack direction and on whether the stack pointer points
1.9428 ++to the last item on the stack or whether it points to the
1.9429 ++space for the next item on the stack.
1.9430 ++
1.9431 ++The default is PRE_DEC when STACK_GROWS_DOWNWARD is
1.9432 ++defined, which is almost always right, and PRE_INC otherwise,
1.9433 ++which is often wrong.
1.9434 ++*/
1.9435 ++/* pushm: *(--SP) <-- Rx */
1.9436 ++#define STACK_PUSH_CODE PRE_DEC
1.9437 ++
1.9438 ++/* Define this to nonzero if the nominal address of the stack frame
1.9439 ++ is at the high-address end of the local variables;
1.9440 ++ that is, each additional local variable allocated
1.9441 ++ goes at a more negative offset in the frame. */
1.9442 ++#define FRAME_GROWS_DOWNWARD 1
1.9443 ++
1.9444 ++
1.9445 ++/*
1.9446 ++Offset from the frame pointer to the first local variable slot to be allocated.
1.9447 ++
1.9448 ++If FRAME_GROWS_DOWNWARD, find the next slot's offset by
1.9449 ++subtracting the first slot's length from STARTING_FRAME_OFFSET.
1.9450 ++Otherwise, it is found by adding the length of the first slot to the
1.9451 ++value STARTING_FRAME_OFFSET.
1.9452 ++ (i'm not sure if the above is still correct.. had to change it to get
1.9453 ++ rid of an overfull. --mew 2feb93 )
1.9454 ++*/
1.9455 ++#define STARTING_FRAME_OFFSET 0
1.9456 ++
1.9457 ++/*
1.9458 ++Offset from the stack pointer register to the first location at which
1.9459 ++outgoing arguments are placed. If not specified, the default value of
1.9460 ++zero is used. This is the proper value for most machines.
1.9461 ++
1.9462 ++If ARGS_GROW_DOWNWARD, this is the offset to the location above
1.9463 ++the first location at which outgoing arguments are placed.
1.9464 ++*/
1.9465 ++#define STACK_POINTER_OFFSET 0
1.9466 ++
1.9467 ++/*
1.9468 ++Offset from the argument pointer register to the first argument's
1.9469 ++address. On some machines it may depend on the data type of the
1.9470 ++function.
1.9471 ++
1.9472 ++If ARGS_GROW_DOWNWARD, this is the offset to the location above
1.9473 ++the first argument's address.
1.9474 ++*/
1.9475 ++#define FIRST_PARM_OFFSET(FUNDECL) 0
1.9476 ++
1.9477 ++
1.9478 ++/*
1.9479 ++A C expression whose value is RTL representing the address in a stack
1.9480 ++frame where the pointer to the caller's frame is stored. Assume that
1.9481 ++FRAMEADDR is an RTL expression for the address of the stack frame
1.9482 ++itself.
1.9483 ++
1.9484 ++If you don't define this macro, the default is to return the value
1.9485 ++of FRAMEADDR - that is, the stack frame address is also the
1.9486 ++address of the stack word that points to the previous frame.
1.9487 ++*/
1.9488 ++#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4)
1.9489 ++
1.9490 ++
1.9491 ++/*
1.9492 ++A C expression whose value is RTL representing the value of the return
1.9493 ++address for the frame COUNT steps up from the current frame, after
1.9494 ++the prologue. FRAMEADDR is the frame pointer of the COUNT
1.9495 ++frame, or the frame pointer of the COUNT - 1 frame if
1.9496 ++RETURN_ADDR_IN_PREVIOUS_FRAME is defined.
1.9497 ++
1.9498 ++The value of the expression must always be the correct address when
1.9499 ++COUNT is zero, but may be NULL_RTX if there is not way to
1.9500 ++determine the return address of other frames.
1.9501 ++*/
1.9502 ++#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR)
1.9503 ++
1.9504 ++
1.9505 ++/*
1.9506 ++A C expression whose value is RTL representing the location of the
1.9507 ++incoming return address at the beginning of any function, before the
1.9508 ++prologue. This RTL is either a REG, indicating that the return
1.9509 ++value is saved in 'REG', or a MEM representing a location in
1.9510 ++the stack.
1.9511 ++
1.9512 ++You only need to define this macro if you want to support call frame
1.9513 ++debugging information like that provided by DWARF 2.
1.9514 ++
1.9515 ++If this RTL is a REG, you should also define
1.9516 ++DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO).
1.9517 ++*/
1.9518 ++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
1.9519 ++
1.9520 ++
1.9521 ++
1.9522 ++/*
1.9523 ++A C expression whose value is an integer giving the offset, in bytes,
1.9524 ++from the value of the stack pointer register to the top of the stack
1.9525 ++frame at the beginning of any function, before the prologue. The top of
1.9526 ++the frame is defined to be the value of the stack pointer in the
1.9527 ++previous frame, just before the call instruction.
1.9528 ++
1.9529 ++You only need to define this macro if you want to support call frame
1.9530 ++debugging information like that provided by DWARF 2.
1.9531 ++*/
1.9532 ++#define INCOMING_FRAME_SP_OFFSET 0
1.9533 ++
1.9534 ++
1.9535 ++/** Exception Handling Support **/
1.9536 ++
1.9537 ++/* Use setjump/longjump for exception handling. */
1.9538 ++#define DWARF2_UNWIND_INFO 0
1.9539 ++#define MUST_USE_SJLJ_EXCEPTIONS 1
1.9540 ++
1.9541 ++/*
1.9542 ++A C expression whose value is the Nth register number used for
1.9543 ++data by exception handlers, or INVALID_REGNUM if fewer than
1.9544 ++N registers are usable.
1.9545 ++
1.9546 ++The exception handling library routines communicate with the exception
1.9547 ++handlers via a set of agreed upon registers. Ideally these registers
1.9548 ++should be call-clobbered; it is possible to use call-saved registers,
1.9549 ++but may negatively impact code size. The target must support at least
1.9550 ++2 data registers, but should define 4 if there are enough free registers.
1.9551 ++
1.9552 ++You must define this macro if you want to support call frame exception
1.9553 ++handling like that provided by DWARF 2.
1.9554 ++*/
1.9555 ++/*
1.9556 ++ Use r9-r11
1.9557 ++*/
1.9558 ++#define EH_RETURN_DATA_REGNO(N) \
1.9559 ++ ((N<3) ? INTERNAL_REGNUM(N+9) : INVALID_REGNUM)
1.9560 ++
1.9561 ++/*
1.9562 ++A C expression whose value is RTL representing a location in which
1.9563 ++to store a stack adjustment to be applied before function return.
1.9564 ++This is used to unwind the stack to an exception handler's call frame.
1.9565 ++It will be assigned zero on code paths that return normally.
1.9566 ++
1.9567 ++Typically this is a call-clobbered hard register that is otherwise
1.9568 ++untouched by the epilogue, but could also be a stack slot.
1.9569 ++
1.9570 ++You must define this macro if you want to support call frame exception
1.9571 ++handling like that provided by DWARF 2.
1.9572 ++*/
1.9573 ++/*
1.9574 ++ Use r8
1.9575 ++*/
1.9576 ++#define EH_RETURN_STACKADJ_REGNO INTERNAL_REGNUM(8)
1.9577 ++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO)
1.9578 ++
1.9579 ++/*
1.9580 ++A C expression whose value is RTL representing a location in which
1.9581 ++to store the address of an exception handler to which we should
1.9582 ++return. It will not be assigned on code paths that return normally.
1.9583 ++
1.9584 ++Typically this is the location in the call frame at which the normal
1.9585 ++return address is stored. For targets that return by popping an
1.9586 ++address off the stack, this might be a memory address just below
1.9587 ++the target call frame rather than inside the current call
1.9588 ++frame. EH_RETURN_STACKADJ_RTX will have already been assigned,
1.9589 ++so it may be used to calculate the location of the target call frame.
1.9590 ++
1.9591 ++Some targets have more complex requirements than storing to an
1.9592 ++address calculable during initial code generation. In that case
1.9593 ++the eh_return instruction pattern should be used instead.
1.9594 ++
1.9595 ++If you want to support call frame exception handling, you must
1.9596 ++define either this macro or the eh_return instruction pattern.
1.9597 ++*/
1.9598 ++/*
1.9599 ++ We define the eh_return instruction pattern, so this isn't needed.
1.9600 ++*/
1.9601 ++/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */
1.9602 ++
1.9603 ++/*
1.9604 ++ This macro chooses the encoding of pointers embedded in the
1.9605 ++ exception handling sections. If at all possible, this should be
1.9606 ++ defined such that the exception handling section will not require
1.9607 ++ dynamic relocations, and so may be read-only.
1.9608 ++
1.9609 ++ code is 0 for data, 1 for code labels, 2 for function
1.9610 ++ pointers. global is true if the symbol may be affected by dynamic
1.9611 ++ relocations. The macro should return a combination of the DW_EH_PE_*
1.9612 ++ defines as found in dwarf2.h.
1.9613 ++
1.9614 ++ If this macro is not defined, pointers will not be encoded but
1.9615 ++ represented directly.
1.9616 ++*/
1.9617 ++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
1.9618 ++ ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \
1.9619 ++ | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \
1.9620 ++ | DW_EH_PE_sdata4)
1.9621 ++
1.9622 ++/* ToDo: The rest of this subsection */
1.9623 ++
1.9624 ++/** Specifying How Stack Checking is Done **/
1.9625 ++/* ToDo: All in this subsection */
1.9626 ++
1.9627 ++/** Registers That Address the Stack Frame **/
1.9628 ++
1.9629 ++/*
1.9630 ++The register number of the stack pointer register, which must also be a
1.9631 ++fixed register according to FIXED_REGISTERS. On most machines,
1.9632 ++the hardware determines which register this is.
1.9633 ++*/
1.9634 ++/* Using r13 as stack pointer. */
1.9635 ++#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13)
1.9636 ++
1.9637 ++/*
1.9638 ++The register number of the frame pointer register, which is used to
1.9639 ++access automatic variables in the stack frame. On some machines, the
1.9640 ++hardware determines which register this is. On other machines, you can
1.9641 ++choose any register you wish for this purpose.
1.9642 ++*/
1.9643 ++/* Use r7 */
1.9644 ++#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7)
1.9645 ++
1.9646 ++
1.9647 ++
1.9648 ++/*
1.9649 ++The register number of the arg pointer register, which is used to access
1.9650 ++the function's argument list. On some machines, this is the same as the
1.9651 ++frame pointer register. On some machines, the hardware determines which
1.9652 ++register this is. On other machines, you can choose any register you
1.9653 ++wish for this purpose. If this is not the same register as the frame
1.9654 ++pointer register, then you must mark it as a fixed register according to
1.9655 ++FIXED_REGISTERS, or arrange to be able to eliminate it (see Section
1.9656 ++10.10.5 [Elimination], page 224).
1.9657 ++*/
1.9658 ++/* Using r5 */
1.9659 ++#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4)
1.9660 ++
1.9661 ++
1.9662 ++/*
1.9663 ++Register numbers used for passing a function's static chain pointer. If
1.9664 ++register windows are used, the register number as seen by the called
1.9665 ++function is STATIC_CHAIN_INCOMING_REGNUM, while the register
1.9666 ++number as seen by the calling function is STATIC_CHAIN_REGNUM. If
1.9667 ++these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need
1.9668 ++not be defined.
1.9669 ++
1.9670 ++The static chain register need not be a fixed register.
1.9671 ++
1.9672 ++If the static chain is passed in memory, these macros should not be
1.9673 ++defined; instead, the next two macros should be defined.
1.9674 ++*/
1.9675 ++/* Using r0 */
1.9676 ++#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0)
1.9677 ++
1.9678 ++
1.9679 ++/** Eliminating Frame Pointer and Arg Pointer **/
1.9680 ++
1.9681 ++/*
1.9682 ++A C expression which is nonzero if a function must have and use a frame
1.9683 ++pointer. This expression is evaluated in the reload pass. If its value is
1.9684 ++nonzero the function will have a frame pointer.
1.9685 ++
1.9686 ++The expression can in principle examine the current function and decide
1.9687 ++according to the facts, but on most machines the constant 0 or the
1.9688 ++constant 1 suffices. Use 0 when the machine allows code to be generated
1.9689 ++with no frame pointer, and doing so saves some time or space. Use 1
1.9690 ++when there is no possible advantage to avoiding a frame pointer.
1.9691 ++
1.9692 ++In certain cases, the compiler does not know how to produce valid code
1.9693 ++without a frame pointer. The compiler recognizes those cases and
1.9694 ++automatically gives the function a frame pointer regardless of what
1.9695 ++FRAME_POINTER_REQUIRED says. You don't need to worry about
1.9696 ++them.
1.9697 ++
1.9698 ++In a function that does not require a frame pointer, the frame pointer
1.9699 ++register can be allocated for ordinary usage, unless you mark it as a
1.9700 ++fixed register. See FIXED_REGISTERS for more information.
1.9701 ++*/
1.9702 ++/* We need the frame pointer when compiling for profiling */
1.9703 ++#define FRAME_POINTER_REQUIRED (current_function_profile)
1.9704 ++
1.9705 ++/*
1.9706 ++A C statement to store in the variable DEPTH_VAR the difference
1.9707 ++between the frame pointer and the stack pointer values immediately after
1.9708 ++the function prologue. The value would be computed from information
1.9709 ++such as the result of get_frame_size () and the tables of
1.9710 ++registers regs_ever_live and call_used_regs.
1.9711 ++
1.9712 ++If ELIMINABLE_REGS is defined, this macro will be not be used and
1.9713 ++need not be defined. Otherwise, it must be defined even if
1.9714 ++FRAME_POINTER_REQUIRED is defined to always be true; in that
1.9715 ++case, you may set DEPTH_VAR to anything.
1.9716 ++*/
1.9717 ++#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size())
1.9718 ++
1.9719 ++/*
1.9720 ++If defined, this macro specifies a table of register pairs used to
1.9721 ++eliminate unneeded registers that point into the stack frame. If it is not
1.9722 ++defined, the only elimination attempted by the compiler is to replace
1.9723 ++references to the frame pointer with references to the stack pointer.
1.9724 ++
1.9725 ++The definition of this macro is a list of structure initializations, each
1.9726 ++of which specifies an original and replacement register.
1.9727 ++
1.9728 ++On some machines, the position of the argument pointer is not known until
1.9729 ++the compilation is completed. In such a case, a separate hard register
1.9730 ++must be used for the argument pointer. This register can be eliminated by
1.9731 ++replacing it with either the frame pointer or the argument pointer,
1.9732 ++depending on whether or not the frame pointer has been eliminated.
1.9733 ++
1.9734 ++In this case, you might specify:
1.9735 ++ #define ELIMINABLE_REGS \
1.9736 ++ {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1.9737 ++ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
1.9738 ++ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
1.9739 ++
1.9740 ++Note that the elimination of the argument pointer with the stack pointer is
1.9741 ++specified first since that is the preferred elimination.
1.9742 ++*/
1.9743 ++#define ELIMINABLE_REGS \
1.9744 ++{ \
1.9745 ++ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
1.9746 ++ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
1.9747 ++ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM } \
1.9748 ++}
1.9749 ++
1.9750 ++/*
1.9751 ++A C expression that returns nonzero if the compiler is allowed to try
1.9752 ++to replace register number FROM with register number
1.9753 ++TO. This macro need only be defined if ELIMINABLE_REGS
1.9754 ++is defined, and will usually be the constant 1, since most of the cases
1.9755 ++preventing register elimination are things that the compiler already
1.9756 ++knows about.
1.9757 ++*/
1.9758 ++#define CAN_ELIMINATE(FROM, TO) 1
1.9759 ++
1.9760 ++/*
1.9761 ++This macro is similar to INITIAL_FRAME_POINTER_OFFSET. It
1.9762 ++specifies the initial difference between the specified pair of
1.9763 ++registers. This macro must be defined if ELIMINABLE_REGS is
1.9764 ++defined.
1.9765 ++*/
1.9766 ++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
1.9767 ++ ((OFFSET) = avr32_initial_elimination_offset(FROM, TO))
1.9768 ++
1.9769 ++/** Passing Function Arguments on the Stack **/
1.9770 ++
1.9771 ++
1.9772 ++/*
1.9773 ++A C expression. If nonzero, push insns will be used to pass
1.9774 ++outgoing arguments.
1.9775 ++If the target machine does not have a push instruction, set it to zero.
1.9776 ++That directs GCC to use an alternate strategy: to
1.9777 ++allocate the entire argument block and then store the arguments into
1.9778 ++it. When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
1.9779 ++*/
1.9780 ++#define PUSH_ARGS 1
1.9781 ++
1.9782 ++
1.9783 ++/*
1.9784 ++A C expression that is the number of bytes actually pushed onto the
1.9785 ++stack when an instruction attempts to push NPUSHED bytes.
1.9786 ++
1.9787 ++On some machines, the definition
1.9788 ++
1.9789 ++ #define PUSH_ROUNDING(BYTES) (BYTES)
1.9790 ++
1.9791 ++will suffice. But on other machines, instructions that appear
1.9792 ++to push one byte actually push two bytes in an attempt to maintain
1.9793 ++alignment. Then the definition should be
1.9794 ++
1.9795 ++ #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1)
1.9796 ++*/
1.9797 ++/* Push 4 bytes at the time. */
1.9798 ++#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3)
1.9799 ++
1.9800 ++/*
1.9801 ++A C expression. If nonzero, the maximum amount of space required for
1.9802 ++outgoing arguments will be computed and placed into the variable
1.9803 ++current_function_outgoing_args_size. No space will be pushed
1.9804 ++onto the stack for each call; instead, the function prologue should
1.9805 ++increase the stack frame size by this amount.
1.9806 ++
1.9807 ++Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper.
1.9808 ++*/
1.9809 ++#define ACCUMULATE_OUTGOING_ARGS 0
1.9810 ++
1.9811 ++
1.9812 ++
1.9813 ++
1.9814 ++/*
1.9815 ++A C expression that should indicate the number of bytes of its own
1.9816 ++arguments that a function pops on returning, or 0 if the
1.9817 ++function pops no arguments and the caller must therefore pop them all
1.9818 ++after the function returns.
1.9819 ++
1.9820 ++FUNDECL is a C variable whose value is a tree node that describes
1.9821 ++the function in question. Normally it is a node of type
1.9822 ++FUNCTION_DECL that describes the declaration of the function.
1.9823 ++From this you can obtain the DECL_ATTRIBUTES of the function.
1.9824 ++
1.9825 ++FUNTYPE is a C variable whose value is a tree node that
1.9826 ++describes the function in question. Normally it is a node of type
1.9827 ++FUNCTION_TYPE that describes the data type of the function.
1.9828 ++From this it is possible to obtain the data types of the value and
1.9829 ++arguments (if known).
1.9830 ++
1.9831 ++When a call to a library function is being considered, FUNDECL
1.9832 ++will contain an identifier node for the library function. Thus, if
1.9833 ++you need to distinguish among various library functions, you can do so
1.9834 ++by their names. Note that ``library function'' in this context means
1.9835 ++a function used to perform arithmetic, whose name is known specially
1.9836 ++in the compiler and was not mentioned in the C code being compiled.
1.9837 ++
1.9838 ++STACK_SIZE is the number of bytes of arguments passed on the
1.9839 ++stack. If a variable number of bytes is passed, it is zero, and
1.9840 ++argument popping will always be the responsibility of the calling function.
1.9841 ++
1.9842 ++On the VAX, all functions always pop their arguments, so the definition
1.9843 ++of this macro is STACK_SIZE. On the 68000, using the standard
1.9844 ++calling convention, no functions pop their arguments, so the value of
1.9845 ++the macro is always 0 in this case. But an alternative calling
1.9846 ++convention is available in which functions that take a fixed number of
1.9847 ++arguments pop them but other functions (such as printf) pop
1.9848 ++nothing (the caller pops all). When this convention is in use,
1.9849 ++FUNTYPE is examined to determine whether a function takes a fixed
1.9850 ++number of arguments.
1.9851 ++*/
1.9852 ++#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
1.9853 ++
1.9854 ++
1.9855 ++/*Return true if this function can we use a single return instruction*/
1.9856 ++#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND)
1.9857 ++
1.9858 ++/*
1.9859 ++A C expression that should indicate the number of bytes a call sequence
1.9860 ++pops off the stack. It is added to the value of RETURN_POPS_ARGS
1.9861 ++when compiling a function call.
1.9862 ++
1.9863 ++CUM is the variable in which all arguments to the called function
1.9864 ++have been accumulated.
1.9865 ++
1.9866 ++On certain architectures, such as the SH5, a call trampoline is used
1.9867 ++that pops certain registers off the stack, depending on the arguments
1.9868 ++that have been passed to the function. Since this is a property of the
1.9869 ++call site, not of the called function, RETURN_POPS_ARGS is not
1.9870 ++appropriate.
1.9871 ++*/
1.9872 ++#define CALL_POPS_ARGS(CUM) 0
1.9873 ++
1.9874 ++/* Passing Arguments in Registers */
1.9875 ++
1.9876 ++/*
1.9877 ++A C expression that controls whether a function argument is passed
1.9878 ++in a register, and which register.
1.9879 ++
1.9880 ++The arguments are CUM, which summarizes all the previous
1.9881 ++arguments; MODE, the machine mode of the argument; TYPE,
1.9882 ++the data type of the argument as a tree node or 0 if that is not known
1.9883 ++(which happens for C support library functions); and NAMED,
1.9884 ++which is 1 for an ordinary argument and 0 for nameless arguments that
1.9885 ++correspond to '...' in the called function's prototype.
1.9886 ++TYPE can be an incomplete type if a syntax error has previously
1.9887 ++occurred.
1.9888 ++
1.9889 ++The value of the expression is usually either a reg RTX for the
1.9890 ++hard register in which to pass the argument, or zero to pass the
1.9891 ++argument on the stack.
1.9892 ++
1.9893 ++For machines like the VAX and 68000, where normally all arguments are
1.9894 ++pushed, zero suffices as a definition.
1.9895 ++
1.9896 ++The value of the expression can also be a parallel RTX. This is
1.9897 ++used when an argument is passed in multiple locations. The mode of the
1.9898 ++of the parallel should be the mode of the entire argument. The
1.9899 ++parallel holds any number of expr_list pairs; each one
1.9900 ++describes where part of the argument is passed. In each
1.9901 ++expr_list the first operand must be a reg RTX for the hard
1.9902 ++register in which to pass this part of the argument, and the mode of the
1.9903 ++register RTX indicates how large this part of the argument is. The
1.9904 ++second operand of the expr_list is a const_int which gives
1.9905 ++the offset in bytes into the entire argument of where this part starts.
1.9906 ++As a special exception the first expr_list in the parallel
1.9907 ++RTX may have a first operand of zero. This indicates that the entire
1.9908 ++argument is also stored on the stack.
1.9909 ++
1.9910 ++The last time this macro is called, it is called with MODE == VOIDmode,
1.9911 ++and its result is passed to the call or call_value
1.9912 ++pattern as operands 2 and 3 respectively.
1.9913 ++
1.9914 ++The usual way to make the ISO library 'stdarg.h' work on a machine
1.9915 ++where some arguments are usually passed in registers, is to cause
1.9916 ++nameless arguments to be passed on the stack instead. This is done
1.9917 ++by making FUNCTION_ARG return 0 whenever NAMED is 0.
1.9918 ++
1.9919 ++You may use the macro MUST_PASS_IN_STACK (MODE, TYPE)
1.9920 ++in the definition of this macro to determine if this argument is of a
1.9921 ++type that must be passed in the stack. If REG_PARM_STACK_SPACE
1.9922 ++is not defined and FUNCTION_ARG returns nonzero for such an
1.9923 ++argument, the compiler will abort. If REG_PARM_STACK_SPACE is
1.9924 ++defined, the argument will be computed in the stack and then loaded into
1.9925 ++a register. */
1.9926 ++
1.9927 ++#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
1.9928 ++ avr32_function_arg(&(CUM), MODE, TYPE, NAMED)
1.9929 ++
1.9930 ++
1.9931 ++
1.9932 ++
1.9933 ++/*
1.9934 ++A C type for declaring a variable that is used as the first argument of
1.9935 ++FUNCTION_ARG and other related values. For some target machines,
1.9936 ++the type int suffices and can hold the number of bytes of
1.9937 ++argument so far.
1.9938 ++
1.9939 ++There is no need to record in CUMULATIVE_ARGS anything about the
1.9940 ++arguments that have been passed on the stack. The compiler has other
1.9941 ++variables to keep track of that. For target machines on which all
1.9942 ++arguments are passed on the stack, there is no need to store anything in
1.9943 ++CUMULATIVE_ARGS; however, the data structure must exist and
1.9944 ++should not be empty, so use int.
1.9945 ++*/
1.9946 ++typedef struct avr32_args
1.9947 ++{
1.9948 ++ /* Index representing the argument register the current function argument
1.9949 ++ will occupy */
1.9950 ++ int index;
1.9951 ++ /* A mask with bits representing the argument registers: if a bit is set
1.9952 ++ then this register is used for an arguemnt */
1.9953 ++ int used_index;
1.9954 ++ /* TRUE if this function has anonymous arguments */
1.9955 ++ int uses_anonymous_args;
1.9956 ++ /* The size in bytes of the named arguments pushed on the stack */
1.9957 ++ int stack_pushed_args_size;
1.9958 ++ /* Set to true if this function needs a Return Value Pointer */
1.9959 ++ int use_rvp;
1.9960 ++
1.9961 ++} CUMULATIVE_ARGS;
1.9962 ++
1.9963 ++
1.9964 ++#define FIRST_CUM_REG_INDEX 0
1.9965 ++#define LAST_CUM_REG_INDEX 4
1.9966 ++#define GET_REG_INDEX(CUM) ((CUM)->index)
1.9967 ++#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX));
1.9968 ++#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX)))
1.9969 ++#define SET_USED_INDEX(CUM, INDEX) \
1.9970 ++ do \
1.9971 ++ { \
1.9972 ++ if (INDEX >= 0) \
1.9973 ++ (CUM)->used_index |= (1 << (INDEX)); \
1.9974 ++ } \
1.9975 ++ while (0)
1.9976 ++#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0)
1.9977 ++
1.9978 ++
1.9979 ++/*
1.9980 ++ A C statement (sans semicolon) for initializing the variable cum for the
1.9981 ++ state at the beginning of the argument list. The variable has type
1.9982 ++ CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of
1.9983 ++ the function which will receive the args, or 0 if the args are to a compiler
1.9984 ++ support library function. For direct calls that are not libcalls, FNDECL
1.9985 ++ contain the declaration node of the function. FNDECL is also set when
1.9986 ++ INIT_CUMULATIVE_ARGS is used to find arguments for the function being
1.9987 ++ compiled. N_NAMED_ARGS is set to the number of named arguments, including a
1.9988 ++ structure return address if it is passed as a parameter, when making a call.
1.9989 ++ When processing incoming arguments, N_NAMED_ARGS is set to -1.
1.9990 ++
1.9991 ++ When processing a call to a compiler support library function, LIBNAME
1.9992 ++ identifies which one. It is a symbol_ref rtx which contains the name of the
1.9993 ++ function, as a string. LIBNAME is 0 when an ordinary C function call is
1.9994 ++ being processed. Thus, each time this macro is called, either LIBNAME or
1.9995 ++ FNTYPE is nonzero, but never both of them at once.
1.9996 ++*/
1.9997 ++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
1.9998 ++ avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL)
1.9999 ++
1.10000 ++
1.10001 ++/*
1.10002 ++A C statement (sans semicolon) to update the summarizer variable
1.10003 ++CUM to advance past an argument in the argument list. The
1.10004 ++values MODE, TYPE and NAMED describe that argument.
1.10005 ++Once this is done, the variable CUM is suitable for analyzing
1.10006 ++the following argument with FUNCTION_ARG, etc.
1.10007 ++
1.10008 ++This macro need not do anything if the argument in question was passed
1.10009 ++on the stack. The compiler knows how to track the amount of stack space
1.10010 ++used for arguments without any special help.
1.10011 ++*/
1.10012 ++#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
1.10013 ++ avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED)
1.10014 ++
1.10015 ++/*
1.10016 ++If defined, a C expression which determines whether, and in which direction,
1.10017 ++to pad out an argument with extra space. The value should be of type
1.10018 ++enum direction: either 'upward' to pad above the argument,
1.10019 ++'downward' to pad below, or 'none' to inhibit padding.
1.10020 ++
1.10021 ++The amount of padding is always just enough to reach the next
1.10022 ++multiple of FUNCTION_ARG_BOUNDARY; this macro does not control
1.10023 ++it.
1.10024 ++
1.10025 ++This macro has a default definition which is right for most systems.
1.10026 ++For little-endian machines, the default is to pad upward. For
1.10027 ++big-endian machines, the default is to pad downward for an argument of
1.10028 ++constant size shorter than an int, and upward otherwise.
1.10029 ++*/
1.10030 ++#define FUNCTION_ARG_PADDING(MODE, TYPE) \
1.10031 ++ avr32_function_arg_padding(MODE, TYPE)
1.10032 ++
1.10033 ++/*
1.10034 ++ Specify padding for the last element of a block move between registers
1.10035 ++ and memory. First is nonzero if this is the only element. Defining
1.10036 ++ this macro allows better control of register function parameters on
1.10037 ++ big-endian machines, without using PARALLEL rtl. In particular,
1.10038 ++ MUST_PASS_IN_STACK need not test padding and mode of types in registers,
1.10039 ++ as there is no longer a "wrong" part of a register; For example, a three
1.10040 ++ byte aggregate may be passed in the high part of a register if so required.
1.10041 ++*/
1.10042 ++#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
1.10043 ++ avr32_function_arg_padding(MODE, TYPE)
1.10044 ++
1.10045 ++/*
1.10046 ++If defined, a C expression which determines whether the default
1.10047 ++implementation of va_arg will attempt to pad down before reading the
1.10048 ++next argument, if that argument is smaller than its aligned space as
1.10049 ++controlled by PARM_BOUNDARY. If this macro is not defined, all such
1.10050 ++arguments are padded down if BYTES_BIG_ENDIAN is true.
1.10051 ++*/
1.10052 ++#define PAD_VARARGS_DOWN \
1.10053 ++ (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
1.10054 ++
1.10055 ++
1.10056 ++/*
1.10057 ++A C expression that is nonzero if REGNO is the number of a hard
1.10058 ++register in which function arguments are sometimes passed. This does
1.10059 ++not include implicit arguments such as the static chain and
1.10060 ++the structure-value address. On many machines, no registers can be
1.10061 ++used for this purpose since all function arguments are pushed on the
1.10062 ++stack.
1.10063 ++*/
1.10064 ++/*
1.10065 ++ Use r8 - r12 for function arguments.
1.10066 ++*/
1.10067 ++#define FUNCTION_ARG_REGNO_P(REGNO) \
1.10068 ++ (REGNO >= 3 && REGNO <= 7)
1.10069 ++
1.10070 ++/* Number of registers used for passing function arguments */
1.10071 ++#define NUM_ARG_REGS 5
1.10072 ++
1.10073 ++/*
1.10074 ++If defined, the order in which arguments are loaded into their
1.10075 ++respective argument registers is reversed so that the last
1.10076 ++argument is loaded first. This macro only affects arguments
1.10077 ++passed in registers.
1.10078 ++*/
1.10079 ++/* #define LOAD_ARGS_REVERSED */
1.10080 ++
1.10081 ++/** How Scalar Function Values Are Returned **/
1.10082 ++
1.10083 ++/* AVR32 is using r12 as return register. */
1.10084 ++#define RET_REGISTER (15 - 12)
1.10085 ++
1.10086 ++
1.10087 ++/*
1.10088 ++A C expression to create an RTX representing the place where a library
1.10089 ++function returns a value of mode MODE. If the precise function
1.10090 ++being called is known, FUNC is a tree node
1.10091 ++(FUNCTION_DECL) for it; otherwise, func is a null
1.10092 ++pointer. This makes it possible to use a different value-returning
1.10093 ++convention for specific functions when all their calls are
1.10094 ++known.
1.10095 ++
1.10096 ++Note that "library function" in this context means a compiler
1.10097 ++support routine, used to perform arithmetic, whose name is known
1.10098 ++specially by the compiler and was not mentioned in the C code being
1.10099 ++compiled.
1.10100 ++
1.10101 ++The definition of LIBRARY_VALUE need not be concerned aggregate
1.10102 ++data types, because none of the library functions returns such types.
1.10103 ++*/
1.10104 ++#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE)
1.10105 ++
1.10106 ++/*
1.10107 ++A C expression that is nonzero if REGNO is the number of a hard
1.10108 ++register in which the values of called function may come back.
1.10109 ++
1.10110 ++A register whose use for returning values is limited to serving as the
1.10111 ++second of a pair (for a value of type double, say) need not be
1.10112 ++recognized by this macro. So for most machines, this definition
1.10113 ++suffices:
1.10114 ++ #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
1.10115 ++
1.10116 ++If the machine has register windows, so that the caller and the called
1.10117 ++function use different registers for the return value, this macro
1.10118 ++should recognize only the caller's register numbers.
1.10119 ++*/
1.10120 ++/*
1.10121 ++ When returning a value of mode DImode, r11:r10 is used, else r12 is used.
1.10122 ++*/
1.10123 ++#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \
1.10124 ++ || (REGNO) == INTERNAL_REGNUM(11))
1.10125 ++
1.10126 ++
1.10127 ++/** How Large Values Are Returned **/
1.10128 ++
1.10129 ++
1.10130 ++/*
1.10131 ++Define this macro to be 1 if all structure and union return values must be
1.10132 ++in memory. Since this results in slower code, this should be defined
1.10133 ++only if needed for compatibility with other compilers or with an ABI.
1.10134 ++If you define this macro to be 0, then the conventions used for structure
1.10135 ++and union return values are decided by the RETURN_IN_MEMORY macro.
1.10136 ++
1.10137 ++If not defined, this defaults to the value 1.
1.10138 ++*/
1.10139 ++#define DEFAULT_PCC_STRUCT_RETURN 0
1.10140 ++
1.10141 ++
1.10142 ++
1.10143 ++
1.10144 ++/** Generating Code for Profiling **/
1.10145 ++
1.10146 ++/*
1.10147 ++A C statement or compound statement to output to FILE some
1.10148 ++assembler code to call the profiling subroutine mcount.
1.10149 ++
1.10150 ++The details of how mcount expects to be called are determined by
1.10151 ++your operating system environment, not by GCC. To figure them out,
1.10152 ++compile a small program for profiling using the system's installed C
1.10153 ++compiler and look at the assembler code that results.
1.10154 ++
1.10155 ++Older implementations of mcount expect the address of a counter
1.10156 ++variable to be loaded into some register. The name of this variable is
1.10157 ++'LP' followed by the number LABELNO, so you would generate
1.10158 ++the name using 'LP%d' in a fprintf.
1.10159 ++*/
1.10160 ++/* ToDo: fixme */
1.10161 ++#ifndef FUNCTION_PROFILER
1.10162 ++#define FUNCTION_PROFILER(FILE, LABELNO) \
1.10163 ++ fprintf((FILE), "/* profiler %d */", (LABELNO))
1.10164 ++#endif
1.10165 ++
1.10166 ++
1.10167 ++/*****************************************************************************
1.10168 ++ * Trampolines for Nested Functions *
1.10169 ++ *****************************************************************************/
1.10170 ++
1.10171 ++/*
1.10172 ++A C statement to output, on the stream FILE, assembler code for a
1.10173 ++block of data that contains the constant parts of a trampoline. This
1.10174 ++code should not include a label - the label is taken care of
1.10175 ++automatically.
1.10176 ++
1.10177 ++If you do not define this macro, it means no template is needed
1.10178 ++for the target. Do not define this macro on systems where the block move
1.10179 ++code to copy the trampoline into place would be larger than the code
1.10180 ++to generate it on the spot.
1.10181 ++*/
1.10182 ++/* ToDo: correct? */
1.10183 ++#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE);
1.10184 ++
1.10185 ++
1.10186 ++/*
1.10187 ++A C expression for the size in bytes of the trampoline, as an integer.
1.10188 ++*/
1.10189 ++/* ToDo: fixme */
1.10190 ++#define TRAMPOLINE_SIZE 0x0C
1.10191 ++
1.10192 ++/*
1.10193 ++Alignment required for trampolines, in bits.
1.10194 ++
1.10195 ++If you don't define this macro, the value of BIGGEST_ALIGNMENT
1.10196 ++is used for aligning trampolines.
1.10197 ++*/
1.10198 ++#define TRAMPOLINE_ALIGNMENT 16
1.10199 ++
1.10200 ++/*
1.10201 ++A C statement to initialize the variable parts of a trampoline.
1.10202 ++ADDR is an RTX for the address of the trampoline; FNADDR is
1.10203 ++an RTX for the address of the nested function; STATIC_CHAIN is an
1.10204 ++RTX for the static chain value that should be passed to the function
1.10205 ++when it is called.
1.10206 ++*/
1.10207 ++#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \
1.10208 ++ avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN)
1.10209 ++
1.10210 ++
1.10211 ++/******************************************************************************
1.10212 ++ * Implicit Calls to Library Routines
1.10213 ++ *****************************************************************************/
1.10214 ++
1.10215 ++/* Tail calling. */
1.10216 ++
1.10217 ++/* A C expression that evaluates to true if it is ok to perform a sibling
1.10218 ++ call to DECL. */
1.10219 ++#define FUNCTION_OK_FOR_SIBCALL(DECL) 0
1.10220 ++
1.10221 ++#define OVERRIDE_OPTIONS avr32_override_options ()
1.10222 ++
1.10223 ++#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) avr32_optimization_options (LEVEL, SIZE)
1.10224 ++
1.10225 ++/******************************************************************************
1.10226 ++ * Addressing Modes
1.10227 ++ *****************************************************************************/
1.10228 ++
1.10229 ++/*
1.10230 ++A C expression that is nonzero if the machine supports pre-increment,
1.10231 ++pre-decrement, post-increment, or post-decrement addressing respectively.
1.10232 ++*/
1.10233 ++/*
1.10234 ++ AVR32 supports Rp++ and --Rp
1.10235 ++*/
1.10236 ++#define HAVE_PRE_INCREMENT 0
1.10237 ++#define HAVE_PRE_DECREMENT 1
1.10238 ++#define HAVE_POST_INCREMENT 1
1.10239 ++#define HAVE_POST_DECREMENT 0
1.10240 ++
1.10241 ++/*
1.10242 ++A C expression that is nonzero if the machine supports pre- or
1.10243 ++post-address side-effect generation involving constants other than
1.10244 ++the size of the memory operand.
1.10245 ++*/
1.10246 ++#define HAVE_PRE_MODIFY_DISP 0
1.10247 ++#define HAVE_POST_MODIFY_DISP 0
1.10248 ++
1.10249 ++/*
1.10250 ++A C expression that is nonzero if the machine supports pre- or
1.10251 ++post-address side-effect generation involving a register displacement.
1.10252 ++*/
1.10253 ++#define HAVE_PRE_MODIFY_REG 0
1.10254 ++#define HAVE_POST_MODIFY_REG 0
1.10255 ++
1.10256 ++/*
1.10257 ++A C expression that is 1 if the RTX X is a constant which
1.10258 ++is a valid address. On most machines, this can be defined as
1.10259 ++CONSTANT_P (X), but a few machines are more restrictive
1.10260 ++in which constant addresses are supported.
1.10261 ++
1.10262 ++CONSTANT_P accepts integer-values expressions whose values are
1.10263 ++not explicitly known, such as symbol_ref, label_ref, and
1.10264 ++high expressions and const arithmetic expressions, in
1.10265 ++addition to const_int and const_double expressions.
1.10266 ++*/
1.10267 ++#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X)
1.10268 ++
1.10269 ++/*
1.10270 ++A number, the maximum number of registers that can appear in a valid
1.10271 ++memory address. Note that it is up to you to specify a value equal to
1.10272 ++the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever
1.10273 ++accept.
1.10274 ++*/
1.10275 ++#define MAX_REGS_PER_ADDRESS 2
1.10276 ++
1.10277 ++/*
1.10278 ++A C compound statement with a conditional goto LABEL;
1.10279 ++executed if X (an RTX) is a legitimate memory address on the
1.10280 ++target machine for a memory operand of mode MODE.
1.10281 ++
1.10282 ++It usually pays to define several simpler macros to serve as
1.10283 ++subroutines for this one. Otherwise it may be too complicated to
1.10284 ++understand.
1.10285 ++
1.10286 ++This macro must exist in two variants: a strict variant and a
1.10287 ++non-strict one. The strict variant is used in the reload pass. It
1.10288 ++must be defined so that any pseudo-register that has not been
1.10289 ++allocated a hard register is considered a memory reference. In
1.10290 ++contexts where some kind of register is required, a pseudo-register
1.10291 ++with no hard register must be rejected.
1.10292 ++
1.10293 ++The non-strict variant is used in other passes. It must be defined to
1.10294 ++accept all pseudo-registers in every context where some kind of
1.10295 ++register is required.
1.10296 ++
1.10297 ++Compiler source files that want to use the strict variant of this
1.10298 ++macro define the macro REG_OK_STRICT. You should use an
1.10299 ++#ifdef REG_OK_STRICT conditional to define the strict variant
1.10300 ++in that case and the non-strict variant otherwise.
1.10301 ++
1.10302 ++Subroutines to check for acceptable registers for various purposes (one
1.10303 ++for base registers, one for index registers, and so on) are typically
1.10304 ++among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS.
1.10305 ++Then only these subroutine macros need have two variants; the higher
1.10306 ++levels of macros may be the same whether strict or not.
1.10307 ++
1.10308 ++Normally, constant addresses which are the sum of a symbol_ref
1.10309 ++and an integer are stored inside a const RTX to mark them as
1.10310 ++constant. Therefore, there is no need to recognize such sums
1.10311 ++specifically as legitimate addresses. Normally you would simply
1.10312 ++recognize any const as legitimate.
1.10313 ++
1.10314 ++Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant
1.10315 ++sums that are not marked with const. It assumes that a naked
1.10316 ++plus indicates indexing. If so, then you must reject such
1.10317 ++naked constant sums as illegitimate addresses, so that none of them will
1.10318 ++be given to PRINT_OPERAND_ADDRESS.
1.10319 ++
1.10320 ++On some machines, whether a symbolic address is legitimate depends on
1.10321 ++the section that the address refers to. On these machines, define the
1.10322 ++macro ENCODE_SECTION_INFO to store the information into the
1.10323 ++symbol_ref, and then check for it here. When you see a
1.10324 ++const, you will have to look inside it to find the
1.10325 ++symbol_ref in order to determine the section.
1.10326 ++
1.10327 ++The best way to modify the name string is by adding text to the
1.10328 ++beginning, with suitable punctuation to prevent any ambiguity. Allocate
1.10329 ++the new name in saveable_obstack. You will have to modify
1.10330 ++ASM_OUTPUT_LABELREF to remove and decode the added text and
1.10331 ++output the name accordingly, and define STRIP_NAME_ENCODING to
1.10332 ++access the original name string.
1.10333 ++
1.10334 ++You can check the information stored here into the symbol_ref in
1.10335 ++the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and
1.10336 ++PRINT_OPERAND_ADDRESS.
1.10337 ++*/
1.10338 ++#ifdef REG_OK_STRICT
1.10339 ++# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
1.10340 ++ do \
1.10341 ++ { \
1.10342 ++ if (avr32_legitimate_address(MODE, X, 1)) \
1.10343 ++ goto LABEL; \
1.10344 ++ } \
1.10345 ++ while (0)
1.10346 ++#else
1.10347 ++# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
1.10348 ++ do \
1.10349 ++ { \
1.10350 ++ if (avr32_legitimate_address(MODE, X, 0)) \
1.10351 ++ goto LABEL; \
1.10352 ++ } \
1.10353 ++ while (0)
1.10354 ++#endif
1.10355 ++
1.10356 ++
1.10357 ++
1.10358 ++/*
1.10359 ++A C compound statement that attempts to replace X with a valid
1.10360 ++memory address for an operand of mode MODE. win will be a
1.10361 ++C statement label elsewhere in the code; the macro definition may use
1.10362 ++
1.10363 ++ GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN);
1.10364 ++
1.10365 ++to avoid further processing if the address has become legitimate.
1.10366 ++
1.10367 ++X will always be the result of a call to break_out_memory_refs,
1.10368 ++and OLDX will be the operand that was given to that function to produce
1.10369 ++X.
1.10370 ++
1.10371 ++The code generated by this macro should not alter the substructure of
1.10372 ++X. If it transforms X into a more legitimate form, it
1.10373 ++should assign X (which will always be a C variable) a new value.
1.10374 ++
1.10375 ++It is not necessary for this macro to come up with a legitimate
1.10376 ++address. The compiler has standard ways of doing so in all cases. In
1.10377 ++fact, it is safe for this macro to do nothing. But often a
1.10378 ++machine-dependent strategy can generate better code.
1.10379 ++*/
1.10380 ++#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
1.10381 ++ do \
1.10382 ++ { \
1.10383 ++ if (GET_CODE(X) == PLUS \
1.10384 ++ && GET_CODE(XEXP(X, 0)) == REG \
1.10385 ++ && GET_CODE(XEXP(X, 1)) == CONST_INT \
1.10386 ++ && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)), \
1.10387 ++ 'K', "Ks16")) \
1.10388 ++ { \
1.10389 ++ rtx index = force_reg(SImode, XEXP(X, 1)); \
1.10390 ++ X = gen_rtx_PLUS( SImode, XEXP(X, 0), index); \
1.10391 ++ } \
1.10392 ++ GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN); \
1.10393 ++ } \
1.10394 ++ while(0)
1.10395 ++
1.10396 ++
1.10397 ++/*
1.10398 ++A C statement or compound statement with a conditional
1.10399 ++goto LABEL; executed if memory address X (an RTX) can have
1.10400 ++different meanings depending on the machine mode of the memory
1.10401 ++reference it is used for or if the address is valid for some modes
1.10402 ++but not others.
1.10403 ++
1.10404 ++Autoincrement and autodecrement addresses typically have mode-dependent
1.10405 ++effects because the amount of the increment or decrement is the size
1.10406 ++of the operand being addressed. Some machines have other mode-dependent
1.10407 ++addresses. Many RISC machines have no mode-dependent addresses.
1.10408 ++
1.10409 ++You may assume that ADDR is a valid address for the machine.
1.10410 ++*/
1.10411 ++#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
1.10412 ++ do \
1.10413 ++ { \
1.10414 ++ if (GET_CODE (ADDR) == POST_INC \
1.10415 ++ || GET_CODE (ADDR) == PRE_DEC) \
1.10416 ++ goto LABEL; \
1.10417 ++ } \
1.10418 ++ while (0)
1.10419 ++
1.10420 ++/*
1.10421 ++A C expression that is nonzero if X is a legitimate constant for
1.10422 ++an immediate operand on the target machine. You can assume that
1.10423 ++X satisfies CONSTANT_P, so you need not check this. In fact,
1.10424 ++'1' is a suitable definition for this macro on machines where
1.10425 ++anything CONSTANT_P is valid.
1.10426 ++*/
1.10427 ++#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X)
1.10428 ++
1.10429 ++
1.10430 ++/******************************************************************************
1.10431 ++ * Condition Code Status
1.10432 ++ *****************************************************************************/
1.10433 ++
1.10434 ++/*
1.10435 ++C code for a data type which is used for declaring the mdep
1.10436 ++component of cc_status. It defaults to int.
1.10437 ++
1.10438 ++This macro is not used on machines that do not use cc0.
1.10439 ++*/
1.10440 ++
1.10441 ++typedef struct
1.10442 ++{
1.10443 ++ int flags;
1.10444 ++ rtx value;
1.10445 ++ int fpflags;
1.10446 ++ rtx fpvalue;
1.10447 ++ int cond_exec_cmp_clobbered;
1.10448 ++} avr32_status_reg;
1.10449 ++
1.10450 ++
1.10451 ++#define CC_STATUS_MDEP avr32_status_reg
1.10452 ++
1.10453 ++/*
1.10454 ++A C expression to initialize the mdep field to "empty".
1.10455 ++The default definition does nothing, since most machines don't use
1.10456 ++the field anyway. If you want to use the field, you should probably
1.10457 ++define this macro to initialize it.
1.10458 ++
1.10459 ++This macro is not used on machines that do not use cc0.
1.10460 ++*/
1.10461 ++
1.10462 ++#define CC_STATUS_MDEP_INIT \
1.10463 ++ (cc_status.mdep.flags = CC_NONE , cc_status.mdep.cond_exec_cmp_clobbered = 0, cc_status.mdep.value = 0)
1.10464 ++
1.10465 ++#define FPCC_STATUS_INIT \
1.10466 ++ (cc_status.mdep.fpflags = CC_NONE , cc_status.mdep.fpvalue = 0)
1.10467 ++
1.10468 ++/*
1.10469 ++A C compound statement to set the components of cc_status
1.10470 ++appropriately for an insn INSN whose body is EXP. It is
1.10471 ++this macro's responsibility to recognize insns that set the condition
1.10472 ++code as a byproduct of other activity as well as those that explicitly
1.10473 ++set (cc0).
1.10474 ++
1.10475 ++This macro is not used on machines that do not use cc0.
1.10476 ++
1.10477 ++If there are insns that do not set the condition code but do alter
1.10478 ++other machine registers, this macro must check to see whether they
1.10479 ++invalidate the expressions that the condition code is recorded as
1.10480 ++reflecting. For example, on the 68000, insns that store in address
1.10481 ++registers do not set the condition code, which means that usually
1.10482 ++NOTICE_UPDATE_CC can leave cc_status unaltered for such
1.10483 ++insns. But suppose that the previous insn set the condition code
1.10484 ++based on location 'a4@@(102)' and the current insn stores a new
1.10485 ++value in 'a4'. Although the condition code is not changed by
1.10486 ++this, it will no longer be true that it reflects the contents of
1.10487 ++'a4@@(102)'. Therefore, NOTICE_UPDATE_CC must alter
1.10488 ++cc_status in this case to say that nothing is known about the
1.10489 ++condition code value.
1.10490 ++
1.10491 ++The definition of NOTICE_UPDATE_CC must be prepared to deal
1.10492 ++with the results of peephole optimization: insns whose patterns are
1.10493 ++parallel RTXs containing various reg, mem or
1.10494 ++constants which are just the operands. The RTL structure of these
1.10495 ++insns is not sufficient to indicate what the insns actually do. What
1.10496 ++NOTICE_UPDATE_CC should do when it sees one is just to run
1.10497 ++CC_STATUS_INIT.
1.10498 ++
1.10499 ++A possible definition of NOTICE_UPDATE_CC is to call a function
1.10500 ++that looks at an attribute (see Insn Attributes) named, for example,
1.10501 ++'cc'. This avoids having detailed information about patterns in
1.10502 ++two places, the 'md' file and in NOTICE_UPDATE_CC.
1.10503 ++*/
1.10504 ++
1.10505 ++#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN)
1.10506 ++
1.10507 ++
1.10508 ++
1.10509 ++
1.10510 ++/******************************************************************************
1.10511 ++ * Describing Relative Costs of Operations
1.10512 ++ *****************************************************************************/
1.10513 ++
1.10514 ++
1.10515 ++
1.10516 ++/*
1.10517 ++A C expression for the cost of moving data of mode MODE from a
1.10518 ++register in class FROM to one in class TO. The classes are
1.10519 ++expressed using the enumeration values such as GENERAL_REGS. A
1.10520 ++value of 2 is the default; other values are interpreted relative to
1.10521 ++that.
1.10522 ++
1.10523 ++It is not required that the cost always equal 2 when FROM is the
1.10524 ++same as TO; on some machines it is expensive to move between
1.10525 ++registers if they are not general registers.
1.10526 ++
1.10527 ++If reload sees an insn consisting of a single set between two
1.10528 ++hard registers, and if REGISTER_MOVE_COST applied to their
1.10529 ++classes returns a value of 2, reload does not check to ensure that the
1.10530 ++constraints of the insn are met. Setting a cost of other than 2 will
1.10531 ++allow reload to verify that the constraints are met. You should do this
1.10532 ++if the movm pattern's constraints do not allow such copying.
1.10533 ++*/
1.10534 ++#define REGISTER_MOVE_COST(MODE, FROM, TO) \
1.10535 ++ ((GET_MODE_SIZE(MODE) <= 4) ? 2: \
1.10536 ++ (GET_MODE_SIZE(MODE) <= 8) ? 3: \
1.10537 ++ 4)
1.10538 ++
1.10539 ++/*
1.10540 ++A C expression for the cost of moving data of mode MODE between a
1.10541 ++register of class CLASS and memory; IN is zero if the value
1.10542 ++is to be written to memory, nonzero if it is to be read in. This cost
1.10543 ++is relative to those in REGISTER_MOVE_COST. If moving between
1.10544 ++registers and memory is more expensive than between two registers, you
1.10545 ++should define this macro to express the relative cost.
1.10546 ++
1.10547 ++If you do not define this macro, GCC uses a default cost of 4 plus
1.10548 ++the cost of copying via a secondary reload register, if one is
1.10549 ++needed. If your machine requires a secondary reload register to copy
1.10550 ++between memory and a register of CLASS but the reload mechanism is
1.10551 ++more complex than copying via an intermediate, define this macro to
1.10552 ++reflect the actual cost of the move.
1.10553 ++
1.10554 ++GCC defines the function memory_move_secondary_cost if
1.10555 ++secondary reloads are needed. It computes the costs due to copying via
1.10556 ++a secondary register. If your machine copies from memory using a
1.10557 ++secondary register in the conventional way but the default base value of
1.10558 ++4 is not correct for your machine, define this macro to add some other
1.10559 ++value to the result of that function. The arguments to that function
1.10560 ++are the same as to this macro.
1.10561 ++*/
1.10562 ++/*
1.10563 ++ Memory moves are costly
1.10564 ++*/
1.10565 ++#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
1.10566 ++ (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 : \
1.10567 ++ (GET_MODE_SIZE(MODE) > 8) ? 6 : \
1.10568 ++ 3) \
1.10569 ++ : ((GET_MODE_SIZE(MODE) > 8) ? 6 : 3)))
1.10570 ++
1.10571 ++/*
1.10572 ++A C expression for the cost of a branch instruction. A value of 1 is
1.10573 ++the default; other values are interpreted relative to that.
1.10574 ++*/
1.10575 ++ /* Try to use conditionals as much as possible */
1.10576 ++#define BRANCH_COST (TARGET_BRANCH_PRED ? 3 : 4)
1.10577 ++
1.10578 ++/*A C expression for the maximum number of instructions to execute via conditional
1.10579 ++ execution instructions instead of a branch. A value of BRANCH_COST+1 is the default
1.10580 ++ if the machine does not use cc0, and 1 if it does use cc0.*/
1.10581 ++#define MAX_CONDITIONAL_EXECUTE 4
1.10582 ++
1.10583 ++/*
1.10584 ++Define this macro as a C expression which is nonzero if accessing less
1.10585 ++than a word of memory (i.e.: a char or a short) is no
1.10586 ++faster than accessing a word of memory, i.e., if such access
1.10587 ++require more than one instruction or if there is no difference in cost
1.10588 ++between byte and (aligned) word loads.
1.10589 ++
1.10590 ++When this macro is not defined, the compiler will access a field by
1.10591 ++finding the smallest containing object; when it is defined, a fullword
1.10592 ++load will be used if alignment permits. Unless bytes accesses are
1.10593 ++faster than word accesses, using word accesses is preferable since it
1.10594 ++may eliminate subsequent memory access if subsequent accesses occur to
1.10595 ++other fields in the same word of the structure, but to different bytes.
1.10596 ++*/
1.10597 ++#define SLOW_BYTE_ACCESS 1
1.10598 ++
1.10599 ++
1.10600 ++/*
1.10601 ++Define this macro if it is as good or better to call a constant
1.10602 ++function address than to call an address kept in a register.
1.10603 ++*/
1.10604 ++#define NO_FUNCTION_CSE
1.10605 ++
1.10606 ++
1.10607 ++/******************************************************************************
1.10608 ++ * Adjusting the Instruction Scheduler
1.10609 ++ *****************************************************************************/
1.10610 ++
1.10611 ++/*****************************************************************************
1.10612 ++ * Dividing the Output into Sections (Texts, Data, ...) *
1.10613 ++ *****************************************************************************/
1.10614 ++
1.10615 ++/*
1.10616 ++A C expression whose value is a string, including spacing, containing the
1.10617 ++assembler operation that should precede instructions and read-only data.
1.10618 ++Normally "\t.text" is right.
1.10619 ++*/
1.10620 ++#define TEXT_SECTION_ASM_OP "\t.text"
1.10621 ++/*
1.10622 ++A C statement that switches to the default section containing instructions.
1.10623 ++Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP
1.10624 ++is enough. The MIPS port uses this to sort all functions after all data
1.10625 ++declarations.
1.10626 ++*/
1.10627 ++/* #define TEXT_SECTION */
1.10628 ++
1.10629 ++/*
1.10630 ++A C expression whose value is a string, including spacing, containing the
1.10631 ++assembler operation to identify the following data as writable initialized
1.10632 ++data. Normally "\t.data" is right.
1.10633 ++*/
1.10634 ++#define DATA_SECTION_ASM_OP "\t.data"
1.10635 ++
1.10636 ++/*
1.10637 ++If defined, a C expression whose value is a string, including spacing,
1.10638 ++containing the assembler operation to identify the following data as
1.10639 ++shared data. If not defined, DATA_SECTION_ASM_OP will be used.
1.10640 ++*/
1.10641 ++
1.10642 ++/*
1.10643 ++A C expression whose value is a string, including spacing, containing
1.10644 ++the assembler operation to identify the following data as read-only
1.10645 ++initialized data.
1.10646 ++*/
1.10647 ++#undef READONLY_DATA_SECTION_ASM_OP
1.10648 ++#define READONLY_DATA_SECTION_ASM_OP \
1.10649 ++ ((TARGET_USE_RODATA_SECTION) ? \
1.10650 ++ "\t.section\t.rodata" : \
1.10651 ++ TEXT_SECTION_ASM_OP )
1.10652 ++
1.10653 ++
1.10654 ++/*
1.10655 ++If defined, a C expression whose value is a string, including spacing,
1.10656 ++containing the assembler operation to identify the following data as
1.10657 ++uninitialized global data. If not defined, and neither
1.10658 ++ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined,
1.10659 ++uninitialized global data will be output in the data section if
1.10660 ++-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be
1.10661 ++used.
1.10662 ++*/
1.10663 ++#define BSS_SECTION_ASM_OP "\t.section\t.bss"
1.10664 ++
1.10665 ++/*
1.10666 ++If defined, a C expression whose value is a string, including spacing,
1.10667 ++containing the assembler operation to identify the following data as
1.10668 ++uninitialized global shared data. If not defined, and
1.10669 ++BSS_SECTION_ASM_OP is, the latter will be used.
1.10670 ++*/
1.10671 ++/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/
1.10672 ++/*
1.10673 ++If defined, a C expression whose value is a string, including spacing,
1.10674 ++containing the assembler operation to identify the following data as
1.10675 ++initialization code. If not defined, GCC will assume such a section does
1.10676 ++not exist.
1.10677 ++*/
1.10678 ++#undef INIT_SECTION_ASM_OP
1.10679 ++#define INIT_SECTION_ASM_OP "\t.section\t.init"
1.10680 ++
1.10681 ++/*
1.10682 ++If defined, a C expression whose value is a string, including spacing,
1.10683 ++containing the assembler operation to identify the following data as
1.10684 ++finalization code. If not defined, GCC will assume such a section does
1.10685 ++not exist.
1.10686 ++*/
1.10687 ++#undef FINI_SECTION_ASM_OP
1.10688 ++#define FINI_SECTION_ASM_OP "\t.section\t.fini"
1.10689 ++
1.10690 ++/*
1.10691 ++If defined, an ASM statement that switches to a different section
1.10692 ++via SECTION_OP, calls FUNCTION, and switches back to
1.10693 ++the text section. This is used in crtstuff.c if
1.10694 ++INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls
1.10695 ++to initialization and finalization functions from the init and fini
1.10696 ++sections. By default, this macro uses a simple function call. Some
1.10697 ++ports need hand-crafted assembly code to avoid dependencies on
1.10698 ++registers initialized in the function prologue or to ensure that
1.10699 ++constant pools don't end up too far way in the text section.
1.10700 ++*/
1.10701 ++#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
1.10702 ++ asm ( SECTION_OP "\n" \
1.10703 ++ "mcall r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \
1.10704 ++ TEXT_SECTION_ASM_OP);
1.10705 ++
1.10706 ++
1.10707 ++/*
1.10708 ++Define this macro to be an expression with a nonzero value if jump
1.10709 ++tables (for tablejump insns) should be output in the text
1.10710 ++section, along with the assembler instructions. Otherwise, the
1.10711 ++readonly data section is used.
1.10712 ++
1.10713 ++This macro is irrelevant if there is no separate readonly data section.
1.10714 ++*/
1.10715 ++/* Put jump tables in text section if we have caches. Otherwise assume that
1.10716 ++ loading data from code memory is slow. */
1.10717 ++#define JUMP_TABLES_IN_TEXT_SECTION \
1.10718 ++ (TARGET_CACHES ? 1 : 0)
1.10719 ++
1.10720 ++
1.10721 ++/******************************************************************************
1.10722 ++ * Position Independent Code (PIC)
1.10723 ++ *****************************************************************************/
1.10724 ++
1.10725 ++#ifndef AVR32_ALWAYS_PIC
1.10726 ++#define AVR32_ALWAYS_PIC 0
1.10727 ++#endif
1.10728 ++
1.10729 ++/* GOT is set to r6 */
1.10730 ++#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6)
1.10731 ++
1.10732 ++/*
1.10733 ++A C expression that is nonzero if X is a legitimate immediate
1.10734 ++operand on the target machine when generating position independent code.
1.10735 ++You can assume that X satisfies CONSTANT_P, so you need not
1.10736 ++check this. You can also assume flag_pic is true, so you need not
1.10737 ++check it either. You need not define this macro if all constants
1.10738 ++(including SYMBOL_REF) can be immediate operands when generating
1.10739 ++position independent code.
1.10740 ++*/
1.10741 ++/* We can't directly access anything that contains a symbol,
1.10742 ++ nor can we indirect via the constant pool. */
1.10743 ++#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X)
1.10744 ++
1.10745 ++
1.10746 ++/* We need to know when we are making a constant pool; this determines
1.10747 ++ whether data needs to be in the GOT or can be referenced via a GOT
1.10748 ++ offset. */
1.10749 ++extern int making_const_table;
1.10750 ++
1.10751 ++/******************************************************************************
1.10752 ++ * Defining the Output Assembler Language
1.10753 ++ *****************************************************************************/
1.10754 ++
1.10755 ++
1.10756 ++/*
1.10757 ++A C string constant describing how to begin a comment in the target
1.10758 ++assembler language. The compiler assumes that the comment will end at
1.10759 ++the end of the line.
1.10760 ++*/
1.10761 ++#define ASM_COMMENT_START "# "
1.10762 ++
1.10763 ++/*
1.10764 ++A C string constant for text to be output before each asm
1.10765 ++statement or group of consecutive ones. Normally this is
1.10766 ++"#APP", which is a comment that has no effect on most
1.10767 ++assemblers but tells the GNU assembler that it must check the lines
1.10768 ++that follow for all valid assembler constructs.
1.10769 ++*/
1.10770 ++#undef ASM_APP_ON
1.10771 ++#define ASM_APP_ON "#APP\n"
1.10772 ++
1.10773 ++/*
1.10774 ++A C string constant for text to be output after each asm
1.10775 ++statement or group of consecutive ones. Normally this is
1.10776 ++"#NO_APP", which tells the GNU assembler to resume making the
1.10777 ++time-saving assumptions that are valid for ordinary compiler output.
1.10778 ++*/
1.10779 ++#undef ASM_APP_OFF
1.10780 ++#define ASM_APP_OFF "#NO_APP\n"
1.10781 ++
1.10782 ++
1.10783 ++
1.10784 ++#define FILE_ASM_OP "\t.file\n"
1.10785 ++#define IDENT_ASM_OP "\t.ident\t"
1.10786 ++#define SET_ASM_OP "\t.set\t"
1.10787 ++
1.10788 ++
1.10789 ++/*
1.10790 ++ * Output assembly directives to switch to section name. The section
1.10791 ++ * should have attributes as specified by flags, which is a bit mask
1.10792 ++ * of the SECTION_* flags defined in 'output.h'. If align is nonzero,
1.10793 ++ * it contains an alignment in bytes to be used for the section,
1.10794 ++ * otherwise some target default should be used. Only targets that
1.10795 ++ * must specify an alignment within the section directive need pay
1.10796 ++ * attention to align -- we will still use ASM_OUTPUT_ALIGN.
1.10797 ++ *
1.10798 ++ * NOTE: This one must not be moved to avr32.c
1.10799 ++ */
1.10800 ++#undef TARGET_ASM_NAMED_SECTION
1.10801 ++#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
1.10802 ++
1.10803 ++
1.10804 ++/*
1.10805 ++You may define this macro as a C expression. You should define the
1.10806 ++expression to have a nonzero value if GCC should output the constant
1.10807 ++pool for a function before the code for the function, or a zero value if
1.10808 ++GCC should output the constant pool after the function. If you do
1.10809 ++not define this macro, the usual case, GCC will output the constant
1.10810 ++pool before the function.
1.10811 ++*/
1.10812 ++#define CONSTANT_POOL_BEFORE_FUNCTION 0
1.10813 ++
1.10814 ++
1.10815 ++/*
1.10816 ++Define this macro as a C expression which is nonzero if the constant
1.10817 ++EXP, of type tree, should be output after the code for a
1.10818 ++function. The compiler will normally output all constants before the
1.10819 ++function; you need not define this macro if this is OK.
1.10820 ++*/
1.10821 ++#define CONSTANT_AFTER_FUNCTION_P(EXP) 1
1.10822 ++
1.10823 ++
1.10824 ++/*
1.10825 ++Define this macro as a C expression which is nonzero if C is
1.10826 ++used as a logical line separator by the assembler.
1.10827 ++
1.10828 ++If you do not define this macro, the default is that only
1.10829 ++the character ';' is treated as a logical line separator.
1.10830 ++*/
1.10831 ++#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == '\n')
1.10832 ++
1.10833 ++
1.10834 ++/** Output of Uninitialized Variables **/
1.10835 ++
1.10836 ++/*
1.10837 ++A C statement (sans semicolon) to output to the stdio stream
1.10838 ++STREAM the assembler definition of a common-label named
1.10839 ++NAME whose size is SIZE bytes. The variable ROUNDED
1.10840 ++is the size rounded up to whatever alignment the caller wants.
1.10841 ++
1.10842 ++Use the expression assemble_name(STREAM, NAME) to
1.10843 ++output the name itself; before and after that, output the additional
1.10844 ++assembler syntax for defining the name, and a newline.
1.10845 ++
1.10846 ++This macro controls how the assembler definitions of uninitialized
1.10847 ++common global variables are output.
1.10848 ++*/
1.10849 ++/*
1.10850 ++#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
1.10851 ++ avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED)
1.10852 ++*/
1.10853 ++
1.10854 ++#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
1.10855 ++ do \
1.10856 ++ { \
1.10857 ++ fputs ("\t.comm ", (FILE)); \
1.10858 ++ assemble_name ((FILE), (NAME)); \
1.10859 ++ fprintf ((FILE), ",%d\n", (SIZE)); \
1.10860 ++ } \
1.10861 ++ while (0)
1.10862 ++
1.10863 ++/*
1.10864 ++ * Like ASM_OUTPUT_BSS except takes the required alignment as a
1.10865 ++ * separate, explicit argument. If you define this macro, it is used
1.10866 ++ * in place of ASM_OUTPUT_BSS, and gives you more flexibility in
1.10867 ++ * handling the required alignment of the variable. The alignment is
1.10868 ++ * specified as the number of bits.
1.10869 ++ *
1.10870 ++ * Try to use function asm_output_aligned_bss defined in file varasm.c
1.10871 ++ * when defining this macro.
1.10872 ++ */
1.10873 ++#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \
1.10874 ++ asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT)
1.10875 ++
1.10876 ++/*
1.10877 ++A C statement (sans semicolon) to output to the stdio stream
1.10878 ++STREAM the assembler definition of a local-common-label named
1.10879 ++NAME whose size is SIZE bytes. The variable ROUNDED
1.10880 ++is the size rounded up to whatever alignment the caller wants.
1.10881 ++
1.10882 ++Use the expression assemble_name(STREAM, NAME) to
1.10883 ++output the name itself; before and after that, output the additional
1.10884 ++assembler syntax for defining the name, and a newline.
1.10885 ++
1.10886 ++This macro controls how the assembler definitions of uninitialized
1.10887 ++static variables are output.
1.10888 ++*/
1.10889 ++#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
1.10890 ++ do \
1.10891 ++ { \
1.10892 ++ fputs ("\t.lcomm ", (FILE)); \
1.10893 ++ assemble_name ((FILE), (NAME)); \
1.10894 ++ fprintf ((FILE), ",%d, %d\n", (SIZE), 2); \
1.10895 ++ } \
1.10896 ++ while (0)
1.10897 ++
1.10898 ++
1.10899 ++/*
1.10900 ++A C statement (sans semicolon) to output to the stdio stream
1.10901 ++STREAM the assembler definition of a label named NAME.
1.10902 ++Use the expression assemble_name(STREAM, NAME) to
1.10903 ++output the name itself; before and after that, output the additional
1.10904 ++assembler syntax for defining the name, and a newline.
1.10905 ++*/
1.10906 ++#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME)
1.10907 ++
1.10908 ++/* A C string containing the appropriate assembler directive to
1.10909 ++ * specify the size of a symbol, without any arguments. On systems
1.10910 ++ * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"';
1.10911 ++ * on other systems, the default is not to define this macro.
1.10912 ++ *
1.10913 ++ * Define this macro only if it is correct to use the default
1.10914 ++ * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and
1.10915 ++ * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own
1.10916 ++ * custom definitions of those macros, or if you do not need explicit
1.10917 ++ * symbol sizes at all, do not define this macro.
1.10918 ++ */
1.10919 ++#define SIZE_ASM_OP "\t.size\t"
1.10920 ++
1.10921 ++
1.10922 ++/*
1.10923 ++A C statement (sans semicolon) to output to the stdio stream
1.10924 ++STREAM some commands that will make the label NAME global;
1.10925 ++that is, available for reference from other files. Use the expression
1.10926 ++assemble_name(STREAM, NAME) to output the name
1.10927 ++itself; before and after that, output the additional assembler syntax
1.10928 ++for making that name global, and a newline.
1.10929 ++*/
1.10930 ++#define GLOBAL_ASM_OP "\t.globl\t"
1.10931 ++
1.10932 ++
1.10933 ++
1.10934 ++/*
1.10935 ++A C expression which evaluates to true if the target supports weak symbols.
1.10936 ++
1.10937 ++If you don't define this macro, defaults.h provides a default
1.10938 ++definition. If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL
1.10939 ++is defined, the default definition is '1'; otherwise, it is
1.10940 ++'0'. Define this macro if you want to control weak symbol support
1.10941 ++with a compiler flag such as -melf.
1.10942 ++*/
1.10943 ++#define SUPPORTS_WEAK 1
1.10944 ++
1.10945 ++/*
1.10946 ++A C statement (sans semicolon) to output to the stdio stream
1.10947 ++STREAM a reference in assembler syntax to a label named
1.10948 ++NAME. This should add '_' to the front of the name, if that
1.10949 ++is customary on your operating system, as it is in most Berkeley Unix
1.10950 ++systems. This macro is used in assemble_name.
1.10951 ++*/
1.10952 ++#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
1.10953 ++ avr32_asm_output_labelref(STREAM, NAME)
1.10954 ++
1.10955 ++
1.10956 ++
1.10957 ++/*
1.10958 ++A C expression to assign to OUTVAR (which is a variable of type
1.10959 ++char *) a newly allocated string made from the string
1.10960 ++NAME and the number NUMBER, with some suitable punctuation
1.10961 ++added. Use alloca to get space for the string.
1.10962 ++
1.10963 ++The string will be used as an argument to ASM_OUTPUT_LABELREF to
1.10964 ++produce an assembler label for an internal static variable whose name is
1.10965 ++NAME. Therefore, the string must be such as to result in valid
1.10966 ++assembler code. The argument NUMBER is different each time this
1.10967 ++macro is executed; it prevents conflicts between similarly-named
1.10968 ++internal static variables in different scopes.
1.10969 ++
1.10970 ++Ideally this string should not be a valid C identifier, to prevent any
1.10971 ++conflict with the user's own symbols. Most assemblers allow periods
1.10972 ++or percent signs in assembler symbols; putting at least one of these
1.10973 ++between the name and the number will suffice.
1.10974 ++*/
1.10975 ++#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER) \
1.10976 ++ do \
1.10977 ++ { \
1.10978 ++ (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10); \
1.10979 ++ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)); \
1.10980 ++ } \
1.10981 ++ while (0)
1.10982 ++
1.10983 ++
1.10984 ++/** Macros Controlling Initialization Routines **/
1.10985 ++
1.10986 ++
1.10987 ++/*
1.10988 ++If defined, main will not call __main as described above.
1.10989 ++This macro should be defined for systems that control start-up code
1.10990 ++on a symbol-by-symbol basis, such as OSF/1, and should not
1.10991 ++be defined explicitly for systems that support INIT_SECTION_ASM_OP.
1.10992 ++*/
1.10993 ++/*
1.10994 ++ __main is not defined when debugging.
1.10995 ++*/
1.10996 ++#define HAS_INIT_SECTION
1.10997 ++
1.10998 ++
1.10999 ++/** Output of Assembler Instructions **/
1.11000 ++
1.11001 ++/*
1.11002 ++A C initializer containing the assembler's names for the machine
1.11003 ++registers, each one as a C string constant. This is what translates
1.11004 ++register numbers in the compiler into assembler language.
1.11005 ++*/
1.11006 ++
1.11007 ++#define REGISTER_NAMES \
1.11008 ++{ \
1.11009 ++ "pc", "lr", \
1.11010 ++ "sp", "r12", \
1.11011 ++ "r11", "r10", \
1.11012 ++ "r9", "r8", \
1.11013 ++ "r7", "r6", \
1.11014 ++ "r5", "r4", \
1.11015 ++ "r3", "r2", \
1.11016 ++ "r1", "r0", \
1.11017 ++ "f15","f14", \
1.11018 ++ "f13","f12", \
1.11019 ++ "f11","f10", \
1.11020 ++ "f9", "f8", \
1.11021 ++ "f7", "f6", \
1.11022 ++ "f5", "f4", \
1.11023 ++ "f3", "f2", \
1.11024 ++ "f1", "f0" \
1.11025 ++}
1.11026 ++
1.11027 ++/*
1.11028 ++A C compound statement to output to stdio stream STREAM the
1.11029 ++assembler syntax for an instruction operand X. X is an
1.11030 ++RTL expression.
1.11031 ++
1.11032 ++CODE is a value that can be used to specify one of several ways
1.11033 ++of printing the operand. It is used when identical operands must be
1.11034 ++printed differently depending on the context. CODE comes from
1.11035 ++the '%' specification that was used to request printing of the
1.11036 ++operand. If the specification was just '%digit' then
1.11037 ++CODE is 0; if the specification was '%ltr digit'
1.11038 ++then CODE is the ASCII code for ltr.
1.11039 ++
1.11040 ++If X is a register, this macro should print the register's name.
1.11041 ++The names can be found in an array reg_names whose type is
1.11042 ++char *[]. reg_names is initialized from REGISTER_NAMES.
1.11043 ++
1.11044 ++When the machine description has a specification '%punct'
1.11045 ++(a '%' followed by a punctuation character), this macro is called
1.11046 ++with a null pointer for X and the punctuation character for
1.11047 ++CODE.
1.11048 ++*/
1.11049 ++#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE)
1.11050 ++
1.11051 ++/* A C statement to be executed just prior to the output of
1.11052 ++ assembler code for INSN, to modify the extracted operands so
1.11053 ++ they will be output differently.
1.11054 ++
1.11055 ++ Here the argument OPVEC is the vector containing the operands
1.11056 ++ extracted from INSN, and NOPERANDS is the number of elements of
1.11057 ++ the vector which contain meaningful data for this insn.
1.11058 ++ The contents of this vector are what will be used to convert the insn
1.11059 ++ template into assembler code, so you can change the assembler output
1.11060 ++ by changing the contents of the vector. */
1.11061 ++#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
1.11062 ++ avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS))
1.11063 ++
1.11064 ++/*
1.11065 ++A C expression which evaluates to true if CODE is a valid
1.11066 ++punctuation character for use in the PRINT_OPERAND macro. If
1.11067 ++PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no
1.11068 ++punctuation characters (except for the standard one, '%') are used
1.11069 ++in this way.
1.11070 ++*/
1.11071 ++#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
1.11072 ++ (((CODE) == '?') \
1.11073 ++ || ((CODE) == '!'))
1.11074 ++
1.11075 ++/*
1.11076 ++A C compound statement to output to stdio stream STREAM the
1.11077 ++assembler syntax for an instruction operand that is a memory reference
1.11078 ++whose address is X. X is an RTL expression.
1.11079 ++
1.11080 ++On some machines, the syntax for a symbolic address depends on the
1.11081 ++section that the address refers to. On these machines, define the macro
1.11082 ++ENCODE_SECTION_INFO to store the information into the
1.11083 ++symbol_ref, and then check for it here. (see Assembler Format.)
1.11084 ++*/
1.11085 ++#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X)
1.11086 ++
1.11087 ++
1.11088 ++/** Output of Dispatch Tables **/
1.11089 ++
1.11090 ++/*
1.11091 ++ * A C statement to output to the stdio stream stream an assembler
1.11092 ++ * pseudo-instruction to generate a difference between two
1.11093 ++ * labels. value and rel are the numbers of two internal labels. The
1.11094 ++ * definitions of these labels are output using
1.11095 ++ * (*targetm.asm_out.internal_label), and they must be printed in the
1.11096 ++ * same way here. For example,
1.11097 ++ *
1.11098 ++ * fprintf (stream, "\t.word L%d-L%d\n",
1.11099 ++ * value, rel)
1.11100 ++ *
1.11101 ++ * You must provide this macro on machines where the addresses in a
1.11102 ++ * dispatch table are relative to the table's own address. If defined,
1.11103 ++ * GCC will also use this macro on all machines when producing
1.11104 ++ * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that
1.11105 ++ * the mode and flags can be read.
1.11106 ++ */
1.11107 ++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
1.11108 ++ fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
1.11109 ++
1.11110 ++/*
1.11111 ++This macro should be provided on machines where the addresses
1.11112 ++in a dispatch table are absolute.
1.11113 ++
1.11114 ++The definition should be a C statement to output to the stdio stream
1.11115 ++STREAM an assembler pseudo-instruction to generate a reference to
1.11116 ++a label. VALUE is the number of an internal label whose
1.11117 ++definition is output using ASM_OUTPUT_INTERNAL_LABEL.
1.11118 ++For example,
1.11119 ++
1.11120 ++fprintf(STREAM, "\t.word L%d\n", VALUE)
1.11121 ++*/
1.11122 ++
1.11123 ++#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
1.11124 ++ fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
1.11125 ++
1.11126 ++/** Assembler Commands for Exception Regions */
1.11127 ++
1.11128 ++/* ToDo: All of this subsection */
1.11129 ++
1.11130 ++/** Assembler Commands for Alignment */
1.11131 ++
1.11132 ++
1.11133 ++/*
1.11134 ++A C statement to output to the stdio stream STREAM an assembler
1.11135 ++command to advance the location counter to a multiple of 2 to the
1.11136 ++POWER bytes. POWER will be a C expression of type int.
1.11137 ++*/
1.11138 ++#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
1.11139 ++ do \
1.11140 ++ { \
1.11141 ++ if ((POWER) != 0) \
1.11142 ++ fprintf(STREAM, "\t.align\t%d\n", POWER); \
1.11143 ++ } \
1.11144 ++ while (0)
1.11145 ++
1.11146 ++/*
1.11147 ++Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if
1.11148 ++necessary.
1.11149 ++*/
1.11150 ++#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \
1.11151 ++ fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER))
1.11152 ++
1.11153 ++
1.11154 ++
1.11155 ++/******************************************************************************
1.11156 ++ * Controlling Debugging Information Format
1.11157 ++ *****************************************************************************/
1.11158 ++
1.11159 ++/* How to renumber registers for dbx and gdb. */
1.11160 ++#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO)
1.11161 ++
1.11162 ++/* The DWARF 2 CFA column which tracks the return address. */
1.11163 ++#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM)
1.11164 ++
1.11165 ++/*
1.11166 ++Define this macro if GCC should produce dwarf version 2 format
1.11167 ++debugging output in response to the -g option.
1.11168 ++
1.11169 ++To support optional call frame debugging information, you must also
1.11170 ++define INCOMING_RETURN_ADDR_RTX and either set
1.11171 ++RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the
1.11172 ++prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save
1.11173 ++as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't.
1.11174 ++*/
1.11175 ++#define DWARF2_DEBUGGING_INFO 1
1.11176 ++
1.11177 ++
1.11178 ++#define DWARF2_ASM_LINE_DEBUG_INFO 1
1.11179 ++#define DWARF2_FRAME_INFO 1
1.11180 ++
1.11181 ++
1.11182 ++/******************************************************************************
1.11183 ++ * Miscellaneous Parameters
1.11184 ++ *****************************************************************************/
1.11185 ++
1.11186 ++/* ToDo: a lot */
1.11187 ++
1.11188 ++/*
1.11189 ++An alias for a machine mode name. This is the machine mode that
1.11190 ++elements of a jump-table should have.
1.11191 ++*/
1.11192 ++#define CASE_VECTOR_MODE SImode
1.11193 ++
1.11194 ++/*
1.11195 ++Define this macro to be a C expression to indicate when jump-tables
1.11196 ++should contain relative addresses. If jump-tables never contain
1.11197 ++relative addresses, then you need not define this macro.
1.11198 ++*/
1.11199 ++#define CASE_VECTOR_PC_RELATIVE 0
1.11200 ++
1.11201 ++/* Increase the threshold for using table jumps on the UC arch. */
1.11202 ++#define CASE_VALUES_THRESHOLD (TARGET_BRANCH_PRED ? 4 : 7)
1.11203 ++
1.11204 ++/*
1.11205 ++The maximum number of bytes that a single instruction can move quickly
1.11206 ++between memory and registers or between two memory locations.
1.11207 ++*/
1.11208 ++#define MOVE_MAX (2*UNITS_PER_WORD)
1.11209 ++
1.11210 ++
1.11211 ++/* A C expression that is nonzero if on this machine the number of bits actually used
1.11212 ++ for the count of a shift operation is equal to the number of bits needed to represent
1.11213 ++ the size of the object being shifted. When this macro is nonzero, the compiler will
1.11214 ++ assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and'
1.11215 ++ instructions that truncates the count of a shift operation. On machines that have
1.11216 ++ instructions that act on bit-fields at variable positions, which may include 'bit test'
1.11217 ++ 378 GNU Compiler Collection (GCC) Internals
1.11218 ++ instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations
1.11219 ++ of the values that serve as arguments to bit-field instructions.
1.11220 ++ If both types of instructions truncate the count (for shifts) and position (for bit-field
1.11221 ++ operations), or if no variable-position bit-field instructions exist, you should define
1.11222 ++ this macro.
1.11223 ++ However, on some machines, such as the 80386 and the 680x0, truncation only applies
1.11224 ++ to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_
1.11225 ++ COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file
1.11226 ++ that include the implied truncation of the shift instructions.
1.11227 ++ You need not dene this macro if it would always have the value of zero. */
1.11228 ++#define SHIFT_COUNT_TRUNCATED 1
1.11229 ++
1.11230 ++/*
1.11231 ++A C expression which is nonzero if on this machine it is safe to
1.11232 ++convert an integer of INPREC bits to one of OUTPREC
1.11233 ++bits (where OUTPREC is smaller than INPREC) by merely
1.11234 ++operating on it as if it had only OUTPREC bits.
1.11235 ++
1.11236 ++On many machines, this expression can be 1.
1.11237 ++
1.11238 ++When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for
1.11239 ++modes for which MODES_TIEABLE_P is 0, suboptimal code can result.
1.11240 ++If this is the case, making TRULY_NOOP_TRUNCATION return 0 in
1.11241 ++such cases may improve things.
1.11242 ++*/
1.11243 ++#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
1.11244 ++
1.11245 ++/*
1.11246 ++An alias for the machine mode for pointers. On most machines, define
1.11247 ++this to be the integer mode corresponding to the width of a hardware
1.11248 ++pointer; SImode on 32-bit machine or DImode on 64-bit machines.
1.11249 ++On some machines you must define this to be one of the partial integer
1.11250 ++modes, such as PSImode.
1.11251 ++
1.11252 ++The width of Pmode must be at least as large as the value of
1.11253 ++POINTER_SIZE. If it is not equal, you must define the macro
1.11254 ++POINTERS_EXTEND_UNSIGNED to specify how pointers are extended
1.11255 ++to Pmode.
1.11256 ++*/
1.11257 ++#define Pmode SImode
1.11258 ++
1.11259 ++/*
1.11260 ++An alias for the machine mode used for memory references to functions
1.11261 ++being called, in call RTL expressions. On most machines this
1.11262 ++should be QImode.
1.11263 ++*/
1.11264 ++#define FUNCTION_MODE SImode
1.11265 ++
1.11266 ++
1.11267 ++#define REG_S_P(x) \
1.11268 ++ (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0))))
1.11269 ++
1.11270 ++
1.11271 ++/* If defined, modifies the length assigned to instruction INSN as a
1.11272 ++ function of the context in which it is used. LENGTH is an lvalue
1.11273 ++ that contains the initially computed length of the insn and should
1.11274 ++ be updated with the correct length of the insn. */
1.11275 ++#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
1.11276 ++ ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH)))
1.11277 ++
1.11278 ++
1.11279 ++#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \
1.11280 ++ (value = 32, (mode == SImode))
1.11281 ++
1.11282 ++#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \
1.11283 ++ (value = 32, (mode == SImode))
1.11284 ++
1.11285 ++#define UNITS_PER_SIMD_WORD UNITS_PER_WORD
1.11286 ++
1.11287 ++#define STORE_FLAG_VALUE 1
1.11288 ++
1.11289 ++
1.11290 ++/* IF-conversion macros. */
1.11291 ++#define IFCVT_MODIFY_INSN( CE_INFO, PATTERN, INSN ) \
1.11292 ++ { \
1.11293 ++ (PATTERN) = avr32_ifcvt_modify_insn (CE_INFO, PATTERN, INSN, &num_true_changes); \
1.11294 ++ }
1.11295 ++
1.11296 ++#define IFCVT_EXTRA_FIELDS \
1.11297 ++ int num_cond_clobber_insns; \
1.11298 ++ int num_extra_move_insns; \
1.11299 ++ rtx extra_move_insns[MAX_CONDITIONAL_EXECUTE]; \
1.11300 ++ rtx moved_insns[MAX_CONDITIONAL_EXECUTE];
1.11301 ++
1.11302 ++#define IFCVT_INIT_EXTRA_FIELDS( CE_INFO ) \
1.11303 ++ { \
1.11304 ++ (CE_INFO)->num_cond_clobber_insns = 0; \
1.11305 ++ (CE_INFO)->num_extra_move_insns = 0; \
1.11306 ++ }
1.11307 ++
1.11308 ++
1.11309 ++#define IFCVT_MODIFY_CANCEL( CE_INFO ) avr32_ifcvt_modify_cancel (CE_INFO, &num_true_changes)
1.11310 ++
1.11311 ++#define IFCVT_ALLOW_MODIFY_TEST_IN_INSN 1
1.11312 ++#define IFCVT_COND_EXEC_BEFORE_RELOAD (TARGET_COND_EXEC_BEFORE_RELOAD)
1.11313 ++
1.11314 ++enum avr32_builtins
1.11315 ++{
1.11316 ++ AVR32_BUILTIN_MTSR,
1.11317 ++ AVR32_BUILTIN_MFSR,
1.11318 ++ AVR32_BUILTIN_MTDR,
1.11319 ++ AVR32_BUILTIN_MFDR,
1.11320 ++ AVR32_BUILTIN_CACHE,
1.11321 ++ AVR32_BUILTIN_SYNC,
1.11322 ++ AVR32_BUILTIN_SSRF,
1.11323 ++ AVR32_BUILTIN_CSRF,
1.11324 ++ AVR32_BUILTIN_TLBR,
1.11325 ++ AVR32_BUILTIN_TLBS,
1.11326 ++ AVR32_BUILTIN_TLBW,
1.11327 ++ AVR32_BUILTIN_BREAKPOINT,
1.11328 ++ AVR32_BUILTIN_XCHG,
1.11329 ++ AVR32_BUILTIN_LDXI,
1.11330 ++ AVR32_BUILTIN_BSWAP16,
1.11331 ++ AVR32_BUILTIN_BSWAP32,
1.11332 ++ AVR32_BUILTIN_COP,
1.11333 ++ AVR32_BUILTIN_MVCR_W,
1.11334 ++ AVR32_BUILTIN_MVRC_W,
1.11335 ++ AVR32_BUILTIN_MVCR_D,
1.11336 ++ AVR32_BUILTIN_MVRC_D,
1.11337 ++ AVR32_BUILTIN_MULSATHH_H,
1.11338 ++ AVR32_BUILTIN_MULSATHH_W,
1.11339 ++ AVR32_BUILTIN_MULSATRNDHH_H,
1.11340 ++ AVR32_BUILTIN_MULSATRNDWH_W,
1.11341 ++ AVR32_BUILTIN_MULSATWH_W,
1.11342 ++ AVR32_BUILTIN_MACSATHH_W,
1.11343 ++ AVR32_BUILTIN_SATADD_H,
1.11344 ++ AVR32_BUILTIN_SATSUB_H,
1.11345 ++ AVR32_BUILTIN_SATADD_W,
1.11346 ++ AVR32_BUILTIN_SATSUB_W,
1.11347 ++ AVR32_BUILTIN_MULWH_D,
1.11348 ++ AVR32_BUILTIN_MULNWH_D,
1.11349 ++ AVR32_BUILTIN_MACWH_D,
1.11350 ++ AVR32_BUILTIN_MACHH_D,
1.11351 ++ AVR32_BUILTIN_MUSFR,
1.11352 ++ AVR32_BUILTIN_MUSTR,
1.11353 ++ AVR32_BUILTIN_SATS,
1.11354 ++ AVR32_BUILTIN_SATU,
1.11355 ++ AVR32_BUILTIN_SATRNDS,
1.11356 ++ AVR32_BUILTIN_SATRNDU,
1.11357 ++ AVR32_BUILTIN_MEMS,
1.11358 ++ AVR32_BUILTIN_MEMC,
1.11359 ++ AVR32_BUILTIN_MEMT
1.11360 ++};
1.11361 ++
1.11362 ++
1.11363 ++#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \
1.11364 ++ ((MODE == SFmode) || (MODE == DFmode))
1.11365 ++
1.11366 ++#define RENAME_LIBRARY_SET ".set"
1.11367 ++
1.11368 ++/* Make ABI_NAME an alias for __GCC_NAME. */
1.11369 ++#define RENAME_LIBRARY(GCC_NAME, ABI_NAME) \
1.11370 ++ __asm__ (".globl\t__avr32_" #ABI_NAME "\n" \
1.11371 ++ ".set\t__avr32_" #ABI_NAME \
1.11372 ++ ", __" #GCC_NAME "\n");
1.11373 ++
1.11374 ++/* Give libgcc functions avr32 ABI name. */
1.11375 ++#ifdef L_muldi3
1.11376 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64)
1.11377 ++#endif
1.11378 ++#ifdef L_divdi3
1.11379 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64)
1.11380 ++#endif
1.11381 ++#ifdef L_udivdi3
1.11382 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64)
1.11383 ++#endif
1.11384 ++#ifdef L_moddi3
1.11385 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64)
1.11386 ++#endif
1.11387 ++#ifdef L_umoddi3
1.11388 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64)
1.11389 ++#endif
1.11390 ++#ifdef L_ashldi3
1.11391 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64)
1.11392 ++#endif
1.11393 ++#ifdef L_lshrdi3
1.11394 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64)
1.11395 ++#endif
1.11396 ++#ifdef L_ashrdi3
1.11397 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64)
1.11398 ++#endif
1.11399 ++
1.11400 ++#ifdef L_fixsfdi
1.11401 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64)
1.11402 ++#endif
1.11403 ++#ifdef L_fixunssfdi
1.11404 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64)
1.11405 ++#endif
1.11406 ++#ifdef L_floatdidf
1.11407 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64)
1.11408 ++#endif
1.11409 ++#ifdef L_floatdisf
1.11410 ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32)
1.11411 ++#endif
1.11412 ++
1.11413 ++#endif
1.11414 +--- /dev/null
1.11415 ++++ b/gcc/config/avr32/avr32.md
1.11416 +@@ -0,0 +1,4893 @@
1.11417 ++;; AVR32 machine description file.
1.11418 ++;; Copyright 2003-2006 Atmel Corporation.
1.11419 ++;;
1.11420 ++;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.11421 ++;;
1.11422 ++;; This file is part of GCC.
1.11423 ++;;
1.11424 ++;; This program is free software; you can redistribute it and/or modify
1.11425 ++;; it under the terms of the GNU General Public License as published by
1.11426 ++;; the Free Software Foundation; either version 2 of the License, or
1.11427 ++;; (at your option) any later version.
1.11428 ++;;
1.11429 ++;; This program is distributed in the hope that it will be useful,
1.11430 ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
1.11431 ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.11432 ++;; GNU General Public License for more details.
1.11433 ++;;
1.11434 ++;; You should have received a copy of the GNU General Public License
1.11435 ++;; along with this program; if not, write to the Free Software
1.11436 ++;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
1.11437 ++
1.11438 ++;; -*- Mode: Scheme -*-
1.11439 ++
1.11440 ++(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm"
1.11441 ++ (const_string "alu"))
1.11442 ++
1.11443 ++
1.11444 ++(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,set_z_if_not_v2,bld,compare,cmp_cond_insn,clobber,call_set,fpcompare,from_fpcc"
1.11445 ++ (const_string "none"))
1.11446 ++
1.11447 ++
1.11448 ++; NB! Keep this in sync with enum architecture_type in avr32.h
1.11449 ++(define_attr "pipeline" "ap,ucr1,ucr2,ucr2nomul"
1.11450 ++ (const (symbol_ref "avr32_arch->arch_type")))
1.11451 ++
1.11452 ++; Insn length in bytes
1.11453 ++(define_attr "length" ""
1.11454 ++ (const_int 4))
1.11455 ++
1.11456 ++; Signal if an insn is predicable and hence can be conditionally executed.
1.11457 ++(define_attr "predicable" "no,yes" (const_string "no"))
1.11458 ++
1.11459 ++;; Uses of UNSPEC in this file:
1.11460 ++(define_constants
1.11461 ++ [(UNSPEC_PUSHM 0)
1.11462 ++ (UNSPEC_POPM 1)
1.11463 ++ (UNSPEC_UDIVMODSI4_INTERNAL 2)
1.11464 ++ (UNSPEC_DIVMODSI4_INTERNAL 3)
1.11465 ++ (UNSPEC_STM 4)
1.11466 ++ (UNSPEC_LDM 5)
1.11467 ++ (UNSPEC_MOVSICC 6)
1.11468 ++ (UNSPEC_ADDSICC 7)
1.11469 ++ (UNSPEC_COND_MI 8)
1.11470 ++ (UNSPEC_COND_PL 9)
1.11471 ++ (UNSPEC_PIC_SYM 10)
1.11472 ++ (UNSPEC_PIC_BASE 11)
1.11473 ++ (UNSPEC_STORE_MULTIPLE 12)
1.11474 ++ (UNSPEC_STMFP 13)
1.11475 ++ (UNSPEC_FPCC_TO_REG 14)
1.11476 ++ (UNSPEC_REG_TO_CC 15)
1.11477 ++ (UNSPEC_FORCE_MINIPOOL 16)
1.11478 ++ (UNSPEC_SATS 17)
1.11479 ++ (UNSPEC_SATU 18)
1.11480 ++ (UNSPEC_SATRNDS 19)
1.11481 ++ (UNSPEC_SATRNDU 20)
1.11482 ++ ])
1.11483 ++
1.11484 ++(define_constants
1.11485 ++ [(VUNSPEC_EPILOGUE 0)
1.11486 ++ (VUNSPEC_CACHE 1)
1.11487 ++ (VUNSPEC_MTSR 2)
1.11488 ++ (VUNSPEC_MFSR 3)
1.11489 ++ (VUNSPEC_BLOCKAGE 4)
1.11490 ++ (VUNSPEC_SYNC 5)
1.11491 ++ (VUNSPEC_TLBR 6)
1.11492 ++ (VUNSPEC_TLBW 7)
1.11493 ++ (VUNSPEC_TLBS 8)
1.11494 ++ (VUNSPEC_BREAKPOINT 9)
1.11495 ++ (VUNSPEC_MTDR 10)
1.11496 ++ (VUNSPEC_MFDR 11)
1.11497 ++ (VUNSPEC_MVCR 12)
1.11498 ++ (VUNSPEC_MVRC 13)
1.11499 ++ (VUNSPEC_COP 14)
1.11500 ++ (VUNSPEC_ALIGN 15)
1.11501 ++ (VUNSPEC_POOL_START 16)
1.11502 ++ (VUNSPEC_POOL_END 17)
1.11503 ++ (VUNSPEC_POOL_4 18)
1.11504 ++ (VUNSPEC_POOL_8 19)
1.11505 ++ (VUNSPEC_POOL_16 20)
1.11506 ++ (VUNSPEC_MUSFR 21)
1.11507 ++ (VUNSPEC_MUSTR 22)
1.11508 ++ (VUNSPEC_SYNC_CMPXCHG 23)
1.11509 ++ (VUNSPEC_SYNC_SET_LOCK_AND_LOAD 24)
1.11510 ++ (VUNSPEC_SYNC_STORE_IF_LOCK 25)
1.11511 ++ (VUNSPEC_EH_RETURN 26)
1.11512 ++ (VUNSPEC_FRS 27)
1.11513 ++ (VUNSPEC_CSRF 28)
1.11514 ++ (VUNSPEC_SSRF 29)
1.11515 ++ ])
1.11516 ++
1.11517 ++(define_constants
1.11518 ++ [
1.11519 ++ ;; R7 = 15-7 = 8
1.11520 ++ (FP_REGNUM 8)
1.11521 ++ ;; Return Register = R12 = 15 - 12 = 3
1.11522 ++ (RETVAL_REGNUM 3)
1.11523 ++ ;; SP = R13 = 15 - 13 = 2
1.11524 ++ (SP_REGNUM 2)
1.11525 ++ ;; LR = R14 = 15 - 14 = 1
1.11526 ++ (LR_REGNUM 1)
1.11527 ++ ;; PC = R15 = 15 - 15 = 0
1.11528 ++ (PC_REGNUM 0)
1.11529 ++ ;; FPSR = GENERAL_REGS + 1 = 17
1.11530 ++ (FPCC_REGNUM 17)
1.11531 ++ ])
1.11532 ++
1.11533 ++
1.11534 ++
1.11535 ++
1.11536 ++;;******************************************************************************
1.11537 ++;; Macros
1.11538 ++;;******************************************************************************
1.11539 ++
1.11540 ++;; Integer Modes for basic alu insns
1.11541 ++(define_mode_macro INTM [SI HI QI])
1.11542 ++(define_mode_attr alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")])
1.11543 ++
1.11544 ++;; Move word modes
1.11545 ++(define_mode_macro MOVM [SI V2HI V4QI])
1.11546 ++
1.11547 ++;; For mov/addcc insns
1.11548 ++(define_mode_macro ADDCC [SI HI QI])
1.11549 ++(define_mode_macro MOVCC [SF SI HI QI])
1.11550 ++(define_mode_macro CMP [DI SI HI QI])
1.11551 ++(define_mode_attr store_postfix [(SF ".w") (SI ".w") (HI ".h") (QI ".b")])
1.11552 ++(define_mode_attr load_postfix [(SF ".w") (SI ".w") (HI ".sh") (QI ".ub")])
1.11553 ++(define_mode_attr load_postfix_s [(SI ".w") (HI ".sh") (QI ".sb")])
1.11554 ++(define_mode_attr load_postfix_u [(SI ".w") (HI ".uh") (QI ".ub")])
1.11555 ++(define_mode_attr pred_mem_constraint [(SF "RKu11") (SI "RKu11") (HI "RKu10") (QI "RKu09")])
1.11556 ++(define_mode_attr cmp_constraint [(DI "rKu20") (SI "rKs21") (HI "r") (QI "r")])
1.11557 ++(define_mode_attr cmp_predicate [(DI "register_immediate_operand")
1.11558 ++ (SI "register_const_int_operand")
1.11559 ++ (HI "register_operand")
1.11560 ++ (QI "register_operand")])
1.11561 ++(define_mode_attr cmp_length [(DI "6")
1.11562 ++ (SI "4")
1.11563 ++ (HI "4")
1.11564 ++ (QI "4")])
1.11565 ++
1.11566 ++;; For all conditional insns
1.11567 ++(define_code_macro any_cond [eq ne gt ge lt le gtu geu ltu leu])
1.11568 ++(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le")
1.11569 ++ (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")])
1.11570 ++(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt")
1.11571 ++ (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")])
1.11572 ++
1.11573 ++;; For logical operations
1.11574 ++(define_code_macro logical [and ior xor])
1.11575 ++(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")])
1.11576 ++
1.11577 ++;; Predicable operations with three register operands
1.11578 ++(define_code_macro predicable_op3 [and ior xor plus minus])
1.11579 ++(define_code_attr predicable_insn3 [(and "and") (ior "or") (xor "eor") (plus "add") (minus "sub")])
1.11580 ++(define_code_attr predicable_commutative3 [(and "%") (ior "%") (xor "%") (plus "%") (minus "")])
1.11581 ++
1.11582 ++;; Load the predicates
1.11583 ++(include "predicates.md")
1.11584 ++
1.11585 ++
1.11586 ++;;******************************************************************************
1.11587 ++;; Automaton pipeline description for avr32
1.11588 ++;;******************************************************************************
1.11589 ++
1.11590 ++(define_automaton "avr32_ap")
1.11591 ++
1.11592 ++
1.11593 ++(define_cpu_unit "is" "avr32_ap")
1.11594 ++(define_cpu_unit "a1,m1,da" "avr32_ap")
1.11595 ++(define_cpu_unit "a2,m2,d" "avr32_ap")
1.11596 ++
1.11597 ++;;Alu instructions
1.11598 ++(define_insn_reservation "alu_op" 1
1.11599 ++ (and (eq_attr "pipeline" "ap")
1.11600 ++ (eq_attr "type" "alu"))
1.11601 ++ "is,a1,a2")
1.11602 ++
1.11603 ++(define_insn_reservation "alu2_op" 2
1.11604 ++ (and (eq_attr "pipeline" "ap")
1.11605 ++ (eq_attr "type" "alu2"))
1.11606 ++ "is,is+a1,a1+a2,a2")
1.11607 ++
1.11608 ++(define_insn_reservation "alu_sat_op" 2
1.11609 ++ (and (eq_attr "pipeline" "ap")
1.11610 ++ (eq_attr "type" "alu_sat"))
1.11611 ++ "is,a1,a2")
1.11612 ++
1.11613 ++
1.11614 ++;;Mul instructions
1.11615 ++(define_insn_reservation "mulhh_op" 2
1.11616 ++ (and (eq_attr "pipeline" "ap")
1.11617 ++ (eq_attr "type" "mulhh,mulwh"))
1.11618 ++ "is,m1,m2")
1.11619 ++
1.11620 ++(define_insn_reservation "mulww_w_op" 3
1.11621 ++ (and (eq_attr "pipeline" "ap")
1.11622 ++ (eq_attr "type" "mulww_w"))
1.11623 ++ "is,m1,m1+m2,m2")
1.11624 ++
1.11625 ++(define_insn_reservation "mulww_d_op" 5
1.11626 ++ (and (eq_attr "pipeline" "ap")
1.11627 ++ (eq_attr "type" "mulww_d"))
1.11628 ++ "is,m1,m1+m2,m1+m2,m2,m2")
1.11629 ++
1.11630 ++(define_insn_reservation "div_op" 33
1.11631 ++ (and (eq_attr "pipeline" "ap")
1.11632 ++ (eq_attr "type" "div"))
1.11633 ++ "is,m1,m1*31 + m2*31,m2")
1.11634 ++
1.11635 ++(define_insn_reservation "machh_w_op" 3
1.11636 ++ (and (eq_attr "pipeline" "ap")
1.11637 ++ (eq_attr "type" "machh_w"))
1.11638 ++ "is*2,m1,m2")
1.11639 ++
1.11640 ++
1.11641 ++(define_insn_reservation "macww_w_op" 4
1.11642 ++ (and (eq_attr "pipeline" "ap")
1.11643 ++ (eq_attr "type" "macww_w"))
1.11644 ++ "is*2,m1,m1,m2")
1.11645 ++
1.11646 ++
1.11647 ++(define_insn_reservation "macww_d_op" 6
1.11648 ++ (and (eq_attr "pipeline" "ap")
1.11649 ++ (eq_attr "type" "macww_d"))
1.11650 ++ "is*2,m1,m1+m2,m1+m2,m2")
1.11651 ++
1.11652 ++;;Bypasses for Mac instructions, because of accumulator cache.
1.11653 ++;;Set latency as low as possible in order to let the compiler let
1.11654 ++;;mul -> mac and mac -> mac combinations which use the same
1.11655 ++;;accumulator cache be placed close together to avoid any
1.11656 ++;;instructions which can ruin the accumulator cache come inbetween.
1.11657 ++(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
1.11658 ++(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
1.11659 ++(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
1.11660 ++
1.11661 ++(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
1.11662 ++(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
1.11663 ++(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
1.11664 ++
1.11665 ++
1.11666 ++;;Bypasses for all mul/mac instructions followed by an instruction
1.11667 ++;;which reads the output AND writes the result to the same register.
1.11668 ++;;This will generate an Write After Write hazard which gives an
1.11669 ++;;extra cycle before the result is ready.
1.11670 ++(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass")
1.11671 ++(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass")
1.11672 ++(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass")
1.11673 ++
1.11674 ++(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass")
1.11675 ++(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass")
1.11676 ++(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass")
1.11677 ++
1.11678 ++;;Branch and call instructions
1.11679 ++;;We assume that all branches and rcalls are predicted correctly :-)
1.11680 ++;;while calls use a lot of cycles.
1.11681 ++(define_insn_reservation "branch_op" 0
1.11682 ++ (and (eq_attr "pipeline" "ap")
1.11683 ++ (eq_attr "type" "branch"))
1.11684 ++ "nothing")
1.11685 ++
1.11686 ++(define_insn_reservation "call_op" 10
1.11687 ++ (and (eq_attr "pipeline" "ap")
1.11688 ++ (eq_attr "type" "call"))
1.11689 ++ "nothing")
1.11690 ++
1.11691 ++
1.11692 ++;;Load store instructions
1.11693 ++(define_insn_reservation "load_op" 2
1.11694 ++ (and (eq_attr "pipeline" "ap")
1.11695 ++ (eq_attr "type" "load"))
1.11696 ++ "is,da,d")
1.11697 ++
1.11698 ++(define_insn_reservation "load_rm_op" 3
1.11699 ++ (and (eq_attr "pipeline" "ap")
1.11700 ++ (eq_attr "type" "load_rm"))
1.11701 ++ "is,da,d")
1.11702 ++
1.11703 ++
1.11704 ++(define_insn_reservation "store_op" 0
1.11705 ++ (and (eq_attr "pipeline" "ap")
1.11706 ++ (eq_attr "type" "store"))
1.11707 ++ "is,da,d")
1.11708 ++
1.11709 ++
1.11710 ++(define_insn_reservation "load_double_op" 3
1.11711 ++ (and (eq_attr "pipeline" "ap")
1.11712 ++ (eq_attr "type" "load2"))
1.11713 ++ "is,da,da+d,d")
1.11714 ++
1.11715 ++(define_insn_reservation "load_quad_op" 4
1.11716 ++ (and (eq_attr "pipeline" "ap")
1.11717 ++ (eq_attr "type" "load4"))
1.11718 ++ "is,da,da+d,da+d,d")
1.11719 ++
1.11720 ++(define_insn_reservation "store_double_op" 0
1.11721 ++ (and (eq_attr "pipeline" "ap")
1.11722 ++ (eq_attr "type" "store2"))
1.11723 ++ "is,da,da+d,d")
1.11724 ++
1.11725 ++
1.11726 ++(define_insn_reservation "store_quad_op" 0
1.11727 ++ (and (eq_attr "pipeline" "ap")
1.11728 ++ (eq_attr "type" "store4"))
1.11729 ++ "is,da,da+d,da+d,d")
1.11730 ++
1.11731 ++;;For store the operand to write to memory is read in d and
1.11732 ++;;the real latency between any instruction and a store is therefore
1.11733 ++;;one less than for the instructions which reads the operands in the first
1.11734 ++;;excecution stage
1.11735 ++(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass")
1.11736 ++(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass")
1.11737 ++(define_bypass 1 "load_op" "store_op" "avr32_store_bypass")
1.11738 ++(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass")
1.11739 ++(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass")
1.11740 ++(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass")
1.11741 ++(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass")
1.11742 ++(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass")
1.11743 ++(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" )
1.11744 ++(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass")
1.11745 ++(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass")
1.11746 ++(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass")
1.11747 ++
1.11748 ++
1.11749 ++; Bypass for load double operation. If only the first loaded word is needed
1.11750 ++; then the latency is 2
1.11751 ++(define_bypass 2 "load_double_op"
1.11752 ++ "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
1.11753 ++ mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
1.11754 ++ "avr32_valid_load_double_bypass")
1.11755 ++
1.11756 ++; Bypass for load quad operation. If only the first or second loaded word is needed
1.11757 ++; we set the latency to 2
1.11758 ++(define_bypass 2 "load_quad_op"
1.11759 ++ "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
1.11760 ++ mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
1.11761 ++ "avr32_valid_load_quad_bypass")
1.11762 ++
1.11763 ++
1.11764 ++;;******************************************************************************
1.11765 ++;; End of Automaton pipeline description for avr32
1.11766 ++;;******************************************************************************
1.11767 ++
1.11768 ++(define_cond_exec
1.11769 ++ [(match_operator 0 "avr32_comparison_operator"
1.11770 ++ [(match_operand:CMP 1 "register_operand" "r")
1.11771 ++ (match_operand:CMP 2 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])]
1.11772 ++ "TARGET_V2_INSNS"
1.11773 ++ "%!"
1.11774 ++)
1.11775 ++
1.11776 ++(define_cond_exec
1.11777 ++ [(match_operator 0 "avr32_comparison_operator"
1.11778 ++ [(and:SI (match_operand:SI 1 "register_operand" "r")
1.11779 ++ (match_operand:SI 2 "one_bit_set_operand" "i"))
1.11780 ++ (const_int 0)])]
1.11781 ++ "TARGET_V2_INSNS"
1.11782 ++ "%!"
1.11783 ++ )
1.11784 ++
1.11785 ++;;=============================================================================
1.11786 ++;; move
1.11787 ++;;-----------------------------------------------------------------------------
1.11788 ++
1.11789 ++
1.11790 ++;;== char - 8 bits ============================================================
1.11791 ++(define_expand "movqi"
1.11792 ++ [(set (match_operand:QI 0 "nonimmediate_operand" "")
1.11793 ++ (match_operand:QI 1 "general_operand" ""))]
1.11794 ++ ""
1.11795 ++ {
1.11796 ++ if ( !no_new_pseudos ){
1.11797 ++ if (GET_CODE (operands[1]) == MEM && optimize){
1.11798 ++ rtx reg = gen_reg_rtx (SImode);
1.11799 ++
1.11800 ++ emit_insn (gen_zero_extendqisi2 (reg, operands[1]));
1.11801 ++ operands[1] = gen_lowpart (QImode, reg);
1.11802 ++ }
1.11803 ++
1.11804 ++ /* One of the ops has to be in a register. */
1.11805 ++ if (GET_CODE (operands[0]) == MEM)
1.11806 ++ operands[1] = force_reg (QImode, operands[1]);
1.11807 ++ }
1.11808 ++
1.11809 ++ })
1.11810 ++
1.11811 ++(define_insn "*movqi_internal"
1.11812 ++ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
1.11813 ++ (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))]
1.11814 ++ "register_operand (operands[0], QImode)
1.11815 ++ || register_operand (operands[1], QImode)"
1.11816 ++ "@
1.11817 ++ mov\t%0, %1
1.11818 ++ ld.ub\t%0, %1
1.11819 ++ st.b\t%0, %1
1.11820 ++ mov\t%0, %1"
1.11821 ++ [(set_attr "length" "2,4,4,4")
1.11822 ++ (set_attr "type" "alu,load_rm,store,alu")])
1.11823 ++
1.11824 ++
1.11825 ++
1.11826 ++;;== short - 16 bits ==========================================================
1.11827 ++(define_expand "movhi"
1.11828 ++ [(set (match_operand:HI 0 "nonimmediate_operand" "")
1.11829 ++ (match_operand:HI 1 "general_operand" ""))]
1.11830 ++ ""
1.11831 ++ {
1.11832 ++ if ( !no_new_pseudos ){
1.11833 ++ if (GET_CODE (operands[1]) == MEM && optimize){
1.11834 ++ rtx reg = gen_reg_rtx (SImode);
1.11835 ++
1.11836 ++ emit_insn (gen_extendhisi2 (reg, operands[1]));
1.11837 ++ operands[1] = gen_lowpart (HImode, reg);
1.11838 ++ }
1.11839 ++
1.11840 ++ /* One of the ops has to be in a register. */
1.11841 ++ if (GET_CODE (operands[0]) == MEM)
1.11842 ++ operands[1] = force_reg (HImode, operands[1]);
1.11843 ++ }
1.11844 ++
1.11845 ++ })
1.11846 ++
1.11847 ++
1.11848 ++(define_insn "*movhi_internal"
1.11849 ++ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
1.11850 ++ (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))]
1.11851 ++ "register_operand (operands[0], HImode)
1.11852 ++ || register_operand (operands[1], HImode)"
1.11853 ++ "@
1.11854 ++ mov\t%0, %1
1.11855 ++ ld.sh\t%0, %1
1.11856 ++ st.h\t%0, %1
1.11857 ++ mov\t%0, %1"
1.11858 ++ [(set_attr "length" "2,4,4,4")
1.11859 ++ (set_attr "type" "alu,load_rm,store,alu")])
1.11860 ++
1.11861 ++
1.11862 ++;;== int - 32 bits ============================================================
1.11863 ++
1.11864 ++(define_expand "movmisalignsi"
1.11865 ++ [(set (match_operand:SI 0 "nonimmediate_operand" "")
1.11866 ++ (match_operand:SI 1 "nonimmediate_operand" ""))]
1.11867 ++ "TARGET_UNALIGNED_WORD"
1.11868 ++ {
1.11869 ++ }
1.11870 ++)
1.11871 ++
1.11872 ++
1.11873 ++(define_expand "mov<mode>"
1.11874 ++ [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "")
1.11875 ++ (match_operand:MOVM 1 "avr32_non_rmw_general_operand" ""))]
1.11876 ++ ""
1.11877 ++ {
1.11878 ++
1.11879 ++ /* One of the ops has to be in a register. */
1.11880 ++ if (GET_CODE (operands[0]) == MEM)
1.11881 ++ operands[1] = force_reg (<MODE>mode, operands[1]);
1.11882 ++
1.11883 ++
1.11884 ++ /* Check for out of range immediate constants as these may
1.11885 ++ occur during reloading, since it seems like reload does
1.11886 ++ not check if the immediate is legitimate. Don't know if
1.11887 ++ this is a bug? */
1.11888 ++ if ( reload_in_progress
1.11889 ++ && avr32_imm_in_const_pool
1.11890 ++ && GET_CODE(operands[1]) == CONST_INT
1.11891 ++ && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){
1.11892 ++ operands[1] = force_const_mem(SImode, operands[1]);
1.11893 ++ }
1.11894 ++
1.11895 ++ /* Check for RMW memory operands. They are not allowed for mov operations
1.11896 ++ only the atomic memc/s/t operations */
1.11897 ++ if ( !reload_in_progress
1.11898 ++ && avr32_rmw_memory_operand (operands[0], <MODE>mode) ){
1.11899 ++ operands[0] = copy_rtx (operands[0]);
1.11900 ++ XEXP(operands[0], 0) = force_reg (<MODE>mode, XEXP(operands[0], 0));
1.11901 ++ }
1.11902 ++
1.11903 ++ if ( !reload_in_progress
1.11904 ++ && avr32_rmw_memory_operand (operands[1], <MODE>mode) ){
1.11905 ++ operands[1] = copy_rtx (operands[1]);
1.11906 ++ XEXP(operands[1], 0) = force_reg (<MODE>mode, XEXP(operands[1], 0));
1.11907 ++ }
1.11908 ++
1.11909 ++ if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
1.11910 ++ && !avr32_legitimate_pic_operand_p(operands[1]) )
1.11911 ++ operands[1] = legitimize_pic_address (operands[1], <MODE>mode,
1.11912 ++ (no_new_pseudos ? operands[0] : 0));
1.11913 ++ else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) )
1.11914 ++ /* If we have an address operand then this function uses the pic register. */
1.11915 ++ current_function_uses_pic_offset_table = 1;
1.11916 ++ })
1.11917 ++
1.11918 ++
1.11919 ++
1.11920 ++(define_insn "mov<mode>_internal"
1.11921 ++ [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "=r, r, r,r,r,Q,r")
1.11922 ++ (match_operand:MOVM 1 "avr32_non_rmw_general_operand" "rKs08,Ks21,J,n,Q,r,W"))]
1.11923 ++ "(register_operand (operands[0], <MODE>mode)
1.11924 ++ || register_operand (operands[1], <MODE>mode))
1.11925 ++ && !avr32_rmw_memory_operand (operands[0], <MODE>mode)
1.11926 ++ && !avr32_rmw_memory_operand (operands[1], <MODE>mode)"
1.11927 ++ {
1.11928 ++ switch (which_alternative) {
1.11929 ++ case 0:
1.11930 ++ case 1: return "mov\t%0, %1";
1.11931 ++ case 2:
1.11932 ++ if ( TARGET_V2_INSNS )
1.11933 ++ return "movh\t%0, hi(%1)";
1.11934 ++ /* Fallthrough */
1.11935 ++ case 3: return "mov\t%0, lo(%1)\;orh\t%0,hi(%1)";
1.11936 ++ case 4:
1.11937 ++ if ( (REG_P(XEXP(operands[1], 0))
1.11938 ++ && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
1.11939 ++ || (GET_CODE(XEXP(operands[1], 0)) == PLUS
1.11940 ++ && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
1.11941 ++ && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
1.11942 ++ && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
1.11943 ++ && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
1.11944 ++ return "lddsp\t%0, %1";
1.11945 ++ else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
1.11946 ++ return "lddpc\t%0, %1";
1.11947 ++ else
1.11948 ++ return "ld.w\t%0, %1";
1.11949 ++ case 5:
1.11950 ++ if ( (REG_P(XEXP(operands[0], 0))
1.11951 ++ && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
1.11952 ++ || (GET_CODE(XEXP(operands[0], 0)) == PLUS
1.11953 ++ && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
1.11954 ++ && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
1.11955 ++ && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
1.11956 ++ && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
1.11957 ++ return "stdsp\t%0, %1";
1.11958 ++ else
1.11959 ++ return "st.w\t%0, %1";
1.11960 ++ case 6:
1.11961 ++ if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
1.11962 ++ return "lda.w\t%0, %1";
1.11963 ++ else
1.11964 ++ return "ld.w\t%0, r6[%1@got]";
1.11965 ++ default:
1.11966 ++ abort();
1.11967 ++ }
1.11968 ++ }
1.11969 ++
1.11970 ++ [(set_attr "length" "2,4,4,8,4,4,8")
1.11971 ++ (set_attr "type" "alu,alu,alu,alu2,load,store,load")
1.11972 ++ (set_attr "cc" "none,none,set_z_if_not_v2,set_z,none,none,clobber")])
1.11973 ++
1.11974 ++
1.11975 ++(define_expand "reload_out_rmw_memory_operand"
1.11976 ++ [(set (match_operand:SI 2 "register_operand" "=r")
1.11977 ++ (match_operand:SI 0 "address_operand" ""))
1.11978 ++ (set (mem:SI (match_dup 2))
1.11979 ++ (match_operand:SI 1 "register_operand" ""))]
1.11980 ++ ""
1.11981 ++ {
1.11982 ++ operands[0] = XEXP(operands[0], 0);
1.11983 ++ }
1.11984 ++)
1.11985 ++
1.11986 ++(define_expand "reload_in_rmw_memory_operand"
1.11987 ++ [(set (match_operand:SI 2 "register_operand" "=r")
1.11988 ++ (match_operand:SI 1 "address_operand" ""))
1.11989 ++ (set (match_operand:SI 0 "register_operand" "")
1.11990 ++ (mem:SI (match_dup 2)))]
1.11991 ++ ""
1.11992 ++ {
1.11993 ++ operands[1] = XEXP(operands[1], 0);
1.11994 ++ }
1.11995 ++)
1.11996 ++
1.11997 ++
1.11998 ++;; These instructions are for loading constants which cannot be loaded
1.11999 ++;; directly from the constant pool because the offset is too large
1.12000 ++;; high and lo_sum are used even tough for our case it should be
1.12001 ++;; low and high sum :-)
1.12002 ++(define_insn "mov_symbol_lo"
1.12003 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.12004 ++ (high:SI (match_operand:SI 1 "immediate_operand" "i" )))]
1.12005 ++ ""
1.12006 ++ "mov\t%0, lo(%1)"
1.12007 ++ [(set_attr "type" "alu")
1.12008 ++ (set_attr "length" "4")]
1.12009 ++)
1.12010 ++
1.12011 ++(define_insn "add_symbol_hi"
1.12012 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.12013 ++ (lo_sum:SI (match_dup 0)
1.12014 ++ (match_operand:SI 1 "immediate_operand" "i" )))]
1.12015 ++ ""
1.12016 ++ "orh\t%0, hi(%1)"
1.12017 ++ [(set_attr "type" "alu")
1.12018 ++ (set_attr "length" "4")]
1.12019 ++)
1.12020 ++
1.12021 ++
1.12022 ++
1.12023 ++;; When generating pic, we need to load the symbol offset into a register.
1.12024 ++;; So that the optimizer does not confuse this with a normal symbol load
1.12025 ++;; we use an unspec. The offset will be loaded from a constant pool entry,
1.12026 ++;; since that is the only type of relocation we can use.
1.12027 ++(define_insn "pic_load_addr"
1.12028 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.12029 ++ (unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))]
1.12030 ++ "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))"
1.12031 ++ "lddpc\t%0, %1"
1.12032 ++ [(set_attr "type" "load")
1.12033 ++ (set_attr "length" "4")]
1.12034 ++)
1.12035 ++
1.12036 ++(define_insn "pic_compute_got_from_pc"
1.12037 ++ [(set (match_operand:SI 0 "register_operand" "+r")
1.12038 ++ (unspec:SI [(minus:SI (pc)
1.12039 ++ (match_dup 0))] UNSPEC_PIC_BASE))
1.12040 ++ (use (label_ref (match_operand 1 "" "")))]
1.12041 ++ "flag_pic"
1.12042 ++ {
1.12043 ++ (*targetm.asm_out.internal_label) (asm_out_file, "L",
1.12044 ++ CODE_LABEL_NUMBER (operands[1]));
1.12045 ++ return \"rsub\t%0, pc\";
1.12046 ++ }
1.12047 ++ [(set_attr "cc" "clobber")
1.12048 ++ (set_attr "length" "2")]
1.12049 ++)
1.12050 ++
1.12051 ++;;== long long int - 64 bits ==================================================
1.12052 ++
1.12053 ++(define_expand "movdi"
1.12054 ++ [(set (match_operand:DI 0 "nonimmediate_operand" "")
1.12055 ++ (match_operand:DI 1 "general_operand" ""))]
1.12056 ++ ""
1.12057 ++ {
1.12058 ++
1.12059 ++ /* One of the ops has to be in a register. */
1.12060 ++ if (GET_CODE (operands[0]) != REG)
1.12061 ++ operands[1] = force_reg (DImode, operands[1]);
1.12062 ++
1.12063 ++ })
1.12064 ++
1.12065 ++
1.12066 ++(define_insn_and_split "*movdi_internal"
1.12067 ++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r, r, r,r,r,m")
1.12068 ++ (match_operand:DI 1 "general_operand" "r, Ks08,Ks21,G,n,m,r"))]
1.12069 ++ "register_operand (operands[0], DImode)
1.12070 ++ || register_operand (operands[1], DImode)"
1.12071 ++ {
1.12072 ++ switch (which_alternative ){
1.12073 ++ case 0:
1.12074 ++ case 1:
1.12075 ++ case 2:
1.12076 ++ case 3:
1.12077 ++ case 4:
1.12078 ++ return "#";
1.12079 ++ case 5:
1.12080 ++ if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
1.12081 ++ return "ld.d\t%0, pc[%1 - .]";
1.12082 ++ else
1.12083 ++ return "ld.d\t%0, %1";
1.12084 ++ case 6:
1.12085 ++ return "st.d\t%0, %1";
1.12086 ++ default:
1.12087 ++ abort();
1.12088 ++ }
1.12089 ++ }
1.12090 ++;; Lets split all reg->reg or imm->reg transfers into two SImode transfers
1.12091 ++ "reload_completed &&
1.12092 ++ (REG_P (operands[0]) &&
1.12093 ++ (REG_P (operands[1])
1.12094 ++ || GET_CODE (operands[1]) == CONST_INT
1.12095 ++ || GET_CODE (operands[1]) == CONST_DOUBLE))"
1.12096 ++ [(set (match_dup 0) (match_dup 1))
1.12097 ++ (set (match_dup 2) (match_dup 3))]
1.12098 ++ {
1.12099 ++ operands[2] = gen_highpart (SImode, operands[0]);
1.12100 ++ operands[0] = gen_lowpart (SImode, operands[0]);
1.12101 ++ if ( REG_P(operands[1]) ){
1.12102 ++ operands[3] = gen_highpart(SImode, operands[1]);
1.12103 ++ operands[1] = gen_lowpart(SImode, operands[1]);
1.12104 ++ } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
1.12105 ++ || GET_CODE(operands[1]) == CONST_INT ){
1.12106 ++ rtx split_const[2];
1.12107 ++ avr32_split_const_expr (DImode, SImode, operands[1], split_const);
1.12108 ++ operands[3] = split_const[1];
1.12109 ++ operands[1] = split_const[0];
1.12110 ++ } else {
1.12111 ++ internal_error("Illegal operand[1] for movdi split!");
1.12112 ++ }
1.12113 ++ }
1.12114 ++
1.12115 ++ [(set_attr "length" "*,*,*,*,*,4,4")
1.12116 ++ (set_attr "type" "*,*,*,*,*,load2,store2")
1.12117 ++ (set_attr "cc" "*,*,*,*,*,none,none")])
1.12118 ++
1.12119 ++
1.12120 ++;;== 128 bits ==================================================
1.12121 ++(define_expand "movti"
1.12122 ++ [(set (match_operand:TI 0 "nonimmediate_operand" "")
1.12123 ++ (match_operand:TI 1 "nonimmediate_operand" ""))]
1.12124 ++ "TARGET_ARCH_AP"
1.12125 ++ {
1.12126 ++
1.12127 ++ /* One of the ops has to be in a register. */
1.12128 ++ if (GET_CODE (operands[0]) != REG)
1.12129 ++ operands[1] = force_reg (TImode, operands[1]);
1.12130 ++
1.12131 ++ /* We must fix any pre_dec for loads and post_inc stores */
1.12132 ++ if ( GET_CODE (operands[0]) == MEM
1.12133 ++ && GET_CODE (XEXP(operands[0],0)) == POST_INC ){
1.12134 ++ emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]);
1.12135 ++ emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode))));
1.12136 ++ DONE;
1.12137 ++ }
1.12138 ++
1.12139 ++ if ( GET_CODE (operands[1]) == MEM
1.12140 ++ && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){
1.12141 ++ emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode))));
1.12142 ++ emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0)));
1.12143 ++ DONE;
1.12144 ++ }
1.12145 ++ })
1.12146 ++
1.12147 ++
1.12148 ++(define_insn_and_split "*movti_internal"
1.12149 ++ [(set (match_operand:TI 0 "avr32_movti_dst_operand" "=r,&r, r, <RKu00,r,r")
1.12150 ++ (match_operand:TI 1 "avr32_movti_src_operand" " r,RKu00>,RKu00,r, n,T"))]
1.12151 ++ "(register_operand (operands[0], TImode)
1.12152 ++ || register_operand (operands[1], TImode))"
1.12153 ++ {
1.12154 ++ switch (which_alternative ){
1.12155 ++ case 0:
1.12156 ++ case 2:
1.12157 ++ case 4:
1.12158 ++ return "#";
1.12159 ++ case 1:
1.12160 ++ return "ldm\t%p1, %0";
1.12161 ++ case 3:
1.12162 ++ return "stm\t%p0, %1";
1.12163 ++ case 5:
1.12164 ++ return "ld.d\t%U0, pc[%1 - .]\;ld.d\t%B0, pc[%1 - . + 8]";
1.12165 ++ }
1.12166 ++ }
1.12167 ++
1.12168 ++ "reload_completed &&
1.12169 ++ (REG_P (operands[0]) &&
1.12170 ++ (REG_P (operands[1])
1.12171 ++ /* If this is a load from the constant pool we split it into
1.12172 ++ two double loads. */
1.12173 ++ || (GET_CODE (operands[1]) == MEM
1.12174 ++ && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
1.12175 ++ && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
1.12176 ++ /* If this is a load where the pointer register is a part
1.12177 ++ of the register list, we must split it into two double
1.12178 ++ loads in order for it to be exception safe. */
1.12179 ++ || (GET_CODE (operands[1]) == MEM
1.12180 ++ && register_operand (XEXP (operands[1], 0), SImode)
1.12181 ++ && reg_overlap_mentioned_p (operands[0], XEXP (operands[1], 0)))
1.12182 ++ || GET_CODE (operands[1]) == CONST_INT
1.12183 ++ || GET_CODE (operands[1]) == CONST_DOUBLE))"
1.12184 ++ [(set (match_dup 0) (match_dup 1))
1.12185 ++ (set (match_dup 2) (match_dup 3))]
1.12186 ++ {
1.12187 ++ operands[2] = simplify_gen_subreg ( DImode, operands[0],
1.12188 ++ TImode, 0 );
1.12189 ++ operands[0] = simplify_gen_subreg ( DImode, operands[0],
1.12190 ++ TImode, 8 );
1.12191 ++ if ( REG_P(operands[1]) ){
1.12192 ++ operands[3] = simplify_gen_subreg ( DImode, operands[1],
1.12193 ++ TImode, 0 );
1.12194 ++ operands[1] = simplify_gen_subreg ( DImode, operands[1],
1.12195 ++ TImode, 8 );
1.12196 ++ } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
1.12197 ++ || GET_CODE(operands[1]) == CONST_INT ){
1.12198 ++ rtx split_const[2];
1.12199 ++ avr32_split_const_expr (TImode, DImode, operands[1], split_const);
1.12200 ++ operands[3] = split_const[1];
1.12201 ++ operands[1] = split_const[0];
1.12202 ++ } else if (avr32_const_pool_ref_operand (operands[1], GET_MODE(operands[1]))){
1.12203 ++ rtx split_const[2];
1.12204 ++ rtx cop = avoid_constant_pool_reference (operands[1]);
1.12205 ++ if (operands[1] == cop)
1.12206 ++ cop = get_pool_constant (XEXP (operands[1], 0));
1.12207 ++ avr32_split_const_expr (TImode, DImode, cop, split_const);
1.12208 ++ operands[3] = force_const_mem (DImode, split_const[1]);
1.12209 ++ operands[1] = force_const_mem (DImode, split_const[0]);
1.12210 ++ } else {
1.12211 ++ rtx ptr_reg = XEXP (operands[1], 0);
1.12212 ++ operands[1] = gen_rtx_MEM (DImode,
1.12213 ++ gen_rtx_PLUS ( SImode,
1.12214 ++ ptr_reg,
1.12215 ++ GEN_INT (8) ));
1.12216 ++ operands[3] = gen_rtx_MEM (DImode,
1.12217 ++ ptr_reg);
1.12218 ++
1.12219 ++ /* Check if the first load will clobber the pointer.
1.12220 ++ If so, we must switch the order of the operations. */
1.12221 ++ if ( reg_overlap_mentioned_p (operands[0], ptr_reg) )
1.12222 ++ {
1.12223 ++ /* We need to switch the order of the operations
1.12224 ++ so that the pointer register does not get clobbered
1.12225 ++ after the first double word load. */
1.12226 ++ rtx tmp;
1.12227 ++ tmp = operands[0];
1.12228 ++ operands[0] = operands[2];
1.12229 ++ operands[2] = tmp;
1.12230 ++ tmp = operands[1];
1.12231 ++ operands[1] = operands[3];
1.12232 ++ operands[3] = tmp;
1.12233 ++ }
1.12234 ++
1.12235 ++
1.12236 ++ }
1.12237 ++ }
1.12238 ++ [(set_attr "length" "*,*,4,4,*,8")
1.12239 ++ (set_attr "type" "*,*,load4,store4,*,load4")])
1.12240 ++
1.12241 ++
1.12242 ++;;== float - 32 bits ==========================================================
1.12243 ++(define_expand "movsf"
1.12244 ++ [(set (match_operand:SF 0 "nonimmediate_operand" "")
1.12245 ++ (match_operand:SF 1 "general_operand" ""))]
1.12246 ++ ""
1.12247 ++ {
1.12248 ++
1.12249 ++
1.12250 ++ /* One of the ops has to be in a register. */
1.12251 ++ if (GET_CODE (operands[0]) != REG)
1.12252 ++ operands[1] = force_reg (SFmode, operands[1]);
1.12253 ++
1.12254 ++ })
1.12255 ++
1.12256 ++(define_insn "*movsf_internal"
1.12257 ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,r,m")
1.12258 ++ (match_operand:SF 1 "general_operand" "r, G,F,m,r"))]
1.12259 ++ "(register_operand (operands[0], SFmode)
1.12260 ++ || register_operand (operands[1], SFmode))"
1.12261 ++ {
1.12262 ++ switch (which_alternative) {
1.12263 ++ case 0:
1.12264 ++ case 1: return "mov\t%0, %1";
1.12265 ++ case 2:
1.12266 ++ {
1.12267 ++ HOST_WIDE_INT target_float[2];
1.12268 ++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (operands[1]), SFmode);
1.12269 ++ if ( TARGET_V2_INSNS
1.12270 ++ && avr32_hi16_immediate_operand (GEN_INT (target_float[0]), VOIDmode) )
1.12271 ++ return "movh\t%0, hi(%1)";
1.12272 ++ else
1.12273 ++ return "mov\t%0, lo(%1)\;orh\t%0, hi(%1)";
1.12274 ++ }
1.12275 ++ case 3:
1.12276 ++ if ( (REG_P(XEXP(operands[1], 0))
1.12277 ++ && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
1.12278 ++ || (GET_CODE(XEXP(operands[1], 0)) == PLUS
1.12279 ++ && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
1.12280 ++ && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
1.12281 ++ && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
1.12282 ++ && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
1.12283 ++ return "lddsp\t%0, %1";
1.12284 ++ else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
1.12285 ++ return "lddpc\t%0, %1";
1.12286 ++ else
1.12287 ++ return "ld.w\t%0, %1";
1.12288 ++ case 4:
1.12289 ++ if ( (REG_P(XEXP(operands[0], 0))
1.12290 ++ && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
1.12291 ++ || (GET_CODE(XEXP(operands[0], 0)) == PLUS
1.12292 ++ && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
1.12293 ++ && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
1.12294 ++ && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
1.12295 ++ && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
1.12296 ++ return "stdsp\t%0, %1";
1.12297 ++ else
1.12298 ++ return "st.w\t%0, %1";
1.12299 ++ default:
1.12300 ++ abort();
1.12301 ++ }
1.12302 ++ }
1.12303 ++
1.12304 ++ [(set_attr "length" "2,4,8,4,4")
1.12305 ++ (set_attr "type" "alu,alu,alu2,load,store")
1.12306 ++ (set_attr "cc" "none,none,clobber,none,none")])
1.12307 ++
1.12308 ++
1.12309 ++
1.12310 ++;;== double - 64 bits =========================================================
1.12311 ++(define_expand "movdf"
1.12312 ++ [(set (match_operand:DF 0 "nonimmediate_operand" "")
1.12313 ++ (match_operand:DF 1 "general_operand" ""))]
1.12314 ++ ""
1.12315 ++ {
1.12316 ++ /* One of the ops has to be in a register. */
1.12317 ++ if (GET_CODE (operands[0]) != REG){
1.12318 ++ operands[1] = force_reg (DFmode, operands[1]);
1.12319 ++ }
1.12320 ++ })
1.12321 ++
1.12322 ++
1.12323 ++(define_insn_and_split "*movdf_internal"
1.12324 ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,r,m")
1.12325 ++ (match_operand:DF 1 "general_operand" " r,G,F,m,r"))]
1.12326 ++ "TARGET_SOFT_FLOAT
1.12327 ++ && (register_operand (operands[0], DFmode)
1.12328 ++ || register_operand (operands[1], DFmode))"
1.12329 ++ {
1.12330 ++ switch (which_alternative ){
1.12331 ++ case 0:
1.12332 ++ case 1:
1.12333 ++ case 2:
1.12334 ++ return "#";
1.12335 ++ case 3:
1.12336 ++ if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
1.12337 ++ return "ld.d\t%0, pc[%1 - .]";
1.12338 ++ else
1.12339 ++ return "ld.d\t%0, %1";
1.12340 ++ case 4:
1.12341 ++ return "st.d\t%0, %1";
1.12342 ++ default:
1.12343 ++ abort();
1.12344 ++ }
1.12345 ++ }
1.12346 ++ "TARGET_SOFT_FLOAT
1.12347 ++ && reload_completed
1.12348 ++ && (REG_P (operands[0])
1.12349 ++ && (REG_P (operands[1])
1.12350 ++ || GET_CODE (operands[1]) == CONST_DOUBLE))"
1.12351 ++ [(set (match_dup 0) (match_dup 1))
1.12352 ++ (set (match_dup 2) (match_dup 3))]
1.12353 ++ "
1.12354 ++ {
1.12355 ++ operands[2] = gen_highpart (SImode, operands[0]);
1.12356 ++ operands[0] = gen_lowpart (SImode, operands[0]);
1.12357 ++ operands[3] = gen_highpart(SImode, operands[1]);
1.12358 ++ operands[1] = gen_lowpart(SImode, operands[1]);
1.12359 ++ }
1.12360 ++ "
1.12361 ++
1.12362 ++ [(set_attr "length" "*,*,*,4,4")
1.12363 ++ (set_attr "type" "*,*,*,load2,store2")
1.12364 ++ (set_attr "cc" "*,*,*,none,none")])
1.12365 ++
1.12366 ++
1.12367 ++;;=============================================================================
1.12368 ++;; Conditional Moves
1.12369 ++;;=============================================================================
1.12370 ++(define_insn "ld<mode>_predicable"
1.12371 ++ [(set (match_operand:MOVCC 0 "register_operand" "=r")
1.12372 ++ (match_operand:MOVCC 1 "avr32_non_rmw_memory_operand" "<MOVCC:pred_mem_constraint>"))]
1.12373 ++ "TARGET_V2_INSNS"
1.12374 ++ "ld<MOVCC:load_postfix>%?\t%0, %1"
1.12375 ++ [(set_attr "length" "4")
1.12376 ++ (set_attr "cc" "cmp_cond_insn")
1.12377 ++ (set_attr "type" "load")
1.12378 ++ (set_attr "predicable" "yes")]
1.12379 ++)
1.12380 ++
1.12381 ++
1.12382 ++(define_insn "st<mode>_predicable"
1.12383 ++ [(set (match_operand:MOVCC 0 "avr32_non_rmw_memory_operand" "=<MOVCC:pred_mem_constraint>")
1.12384 ++ (match_operand:MOVCC 1 "register_operand" "r"))]
1.12385 ++ "TARGET_V2_INSNS"
1.12386 ++ "st<MOVCC:store_postfix>%?\t%0, %1"
1.12387 ++ [(set_attr "length" "4")
1.12388 ++ (set_attr "cc" "cmp_cond_insn")
1.12389 ++ (set_attr "type" "store")
1.12390 ++ (set_attr "predicable" "yes")]
1.12391 ++)
1.12392 ++
1.12393 ++(define_insn "mov<mode>_predicable"
1.12394 ++ [(set (match_operand:MOVCC 0 "register_operand" "=r")
1.12395 ++ (match_operand:MOVCC 1 "avr32_cond_register_immediate_operand" "rKs08"))]
1.12396 ++ ""
1.12397 ++ "mov%?\t%0, %1"
1.12398 ++ [(set_attr "length" "4")
1.12399 ++ (set_attr "cc" "cmp_cond_insn")
1.12400 ++ (set_attr "type" "alu")
1.12401 ++ (set_attr "predicable" "yes")]
1.12402 ++)
1.12403 ++
1.12404 ++
1.12405 ++;;=============================================================================
1.12406 ++;; Move chunks of memory
1.12407 ++;;=============================================================================
1.12408 ++
1.12409 ++(define_expand "movmemsi"
1.12410 ++ [(match_operand:BLK 0 "general_operand" "")
1.12411 ++ (match_operand:BLK 1 "general_operand" "")
1.12412 ++ (match_operand:SI 2 "const_int_operand" "")
1.12413 ++ (match_operand:SI 3 "const_int_operand" "")]
1.12414 ++ ""
1.12415 ++ "
1.12416 ++ if (avr32_gen_movmemsi (operands))
1.12417 ++ DONE;
1.12418 ++ FAIL;
1.12419 ++ "
1.12420 ++ )
1.12421 ++
1.12422 ++
1.12423 ++
1.12424 ++
1.12425 ++;;=============================================================================
1.12426 ++;; Bit field instructions
1.12427 ++;;-----------------------------------------------------------------------------
1.12428 ++;; Instructions to insert or extract bit-fields
1.12429 ++;;=============================================================================
1.12430 ++
1.12431 ++(define_insn "insv"
1.12432 ++ [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
1.12433 ++ (match_operand:SI 1 "immediate_operand" "Ku05")
1.12434 ++ (match_operand:SI 2 "immediate_operand" "Ku05"))
1.12435 ++ (match_operand 3 "register_operand" "r"))]
1.12436 ++ ""
1.12437 ++ "bfins\t%0, %3, %2, %1"
1.12438 ++ [(set_attr "type" "alu")
1.12439 ++ (set_attr "length" "4")
1.12440 ++ (set_attr "cc" "set_ncz")])
1.12441 ++
1.12442 ++
1.12443 ++
1.12444 ++(define_expand "extv"
1.12445 ++ [ (set (match_operand:SI 0 "register_operand" "")
1.12446 ++ (sign_extract:SI (match_operand:SI 1 "register_operand" "")
1.12447 ++ (match_operand:SI 2 "immediate_operand" "")
1.12448 ++ (match_operand:SI 3 "immediate_operand" "")))]
1.12449 ++ ""
1.12450 ++ {
1.12451 ++ if ( INTVAL(operands[2]) >= 32 )
1.12452 ++ FAIL;
1.12453 ++ }
1.12454 ++)
1.12455 ++
1.12456 ++(define_expand "extzv"
1.12457 ++ [ (set (match_operand:SI 0 "register_operand" "")
1.12458 ++ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
1.12459 ++ (match_operand:SI 2 "immediate_operand" "")
1.12460 ++ (match_operand:SI 3 "immediate_operand" "")))]
1.12461 ++ ""
1.12462 ++ {
1.12463 ++ if ( INTVAL(operands[2]) >= 32 )
1.12464 ++ FAIL;
1.12465 ++ }
1.12466 ++)
1.12467 ++
1.12468 ++(define_insn "extv_internal"
1.12469 ++ [ (set (match_operand:SI 0 "register_operand" "=r")
1.12470 ++ (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
1.12471 ++ (match_operand:SI 2 "immediate_operand" "Ku05")
1.12472 ++ (match_operand:SI 3 "immediate_operand" "Ku05")))]
1.12473 ++ "INTVAL(operands[2]) < 32"
1.12474 ++ "bfexts\t%0, %1, %3, %2"
1.12475 ++ [(set_attr "type" "alu")
1.12476 ++ (set_attr "length" "4")
1.12477 ++ (set_attr "cc" "set_ncz")])
1.12478 ++
1.12479 ++
1.12480 ++(define_insn "extzv_internal"
1.12481 ++ [ (set (match_operand:SI 0 "register_operand" "=r")
1.12482 ++ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
1.12483 ++ (match_operand:SI 2 "immediate_operand" "Ku05")
1.12484 ++ (match_operand:SI 3 "immediate_operand" "Ku05")))]
1.12485 ++ "INTVAL(operands[2]) < 32"
1.12486 ++ "bfextu\t%0, %1, %3, %2"
1.12487 ++ [(set_attr "type" "alu")
1.12488 ++ (set_attr "length" "4")
1.12489 ++ (set_attr "cc" "set_ncz")])
1.12490 ++
1.12491 ++
1.12492 ++
1.12493 ++;;=============================================================================
1.12494 ++;; Some peepholes for avoiding unnecessary cast instructions
1.12495 ++;; followed by bfins.
1.12496 ++;;-----------------------------------------------------------------------------
1.12497 ++
1.12498 ++(define_peephole2
1.12499 ++ [(set (match_operand:SI 0 "register_operand" "")
1.12500 ++ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
1.12501 ++ (set (zero_extract:SI (match_operand 2 "register_operand" "")
1.12502 ++ (match_operand:SI 3 "immediate_operand" "")
1.12503 ++ (match_operand:SI 4 "immediate_operand" ""))
1.12504 ++ (match_dup 0))]
1.12505 ++ "((peep2_reg_dead_p(2, operands[0]) &&
1.12506 ++ (INTVAL(operands[3]) <= 8)))"
1.12507 ++ [(set (zero_extract:SI (match_dup 2)
1.12508 ++ (match_dup 3)
1.12509 ++ (match_dup 4))
1.12510 ++ (match_dup 1))]
1.12511 ++ )
1.12512 ++
1.12513 ++(define_peephole2
1.12514 ++ [(set (match_operand:SI 0 "register_operand" "")
1.12515 ++ (zero_extend:SI (match_operand:HI 1 "register_operand" "")))
1.12516 ++ (set (zero_extract:SI (match_operand 2 "register_operand" "")
1.12517 ++ (match_operand:SI 3 "immediate_operand" "")
1.12518 ++ (match_operand:SI 4 "immediate_operand" ""))
1.12519 ++ (match_dup 0))]
1.12520 ++ "((peep2_reg_dead_p(2, operands[0]) &&
1.12521 ++ (INTVAL(operands[3]) <= 16)))"
1.12522 ++ [(set (zero_extract:SI (match_dup 2)
1.12523 ++ (match_dup 3)
1.12524 ++ (match_dup 4))
1.12525 ++ (match_dup 1))]
1.12526 ++ )
1.12527 ++
1.12528 ++;;=============================================================================
1.12529 ++;; push bytes
1.12530 ++;;-----------------------------------------------------------------------------
1.12531 ++;; Implements the push instruction
1.12532 ++;;=============================================================================
1.12533 ++(define_insn "pushm"
1.12534 ++ [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM)))
1.12535 ++ (unspec:BLK [(match_operand 0 "const_int_operand" "")]
1.12536 ++ UNSPEC_PUSHM))]
1.12537 ++ ""
1.12538 ++ {
1.12539 ++ if (INTVAL(operands[0])) {
1.12540 ++ return "pushm\t%r0";
1.12541 ++ } else {
1.12542 ++ return "";
1.12543 ++ }
1.12544 ++ }
1.12545 ++ [(set_attr "type" "store")
1.12546 ++ (set_attr "length" "2")
1.12547 ++ (set_attr "cc" "none")])
1.12548 ++
1.12549 ++(define_insn "stm"
1.12550 ++ [(unspec [(match_operand 0 "register_operand" "r")
1.12551 ++ (match_operand 1 "const_int_operand" "")
1.12552 ++ (match_operand 2 "const_int_operand" "")]
1.12553 ++ UNSPEC_STM)]
1.12554 ++ ""
1.12555 ++ {
1.12556 ++ if (INTVAL(operands[1])) {
1.12557 ++ if (INTVAL(operands[2]) != 0)
1.12558 ++ return "stm\t--%0, %s1";
1.12559 ++ else
1.12560 ++ return "stm\t%0, %s1";
1.12561 ++ } else {
1.12562 ++ return "";
1.12563 ++ }
1.12564 ++ }
1.12565 ++ [(set_attr "type" "store")
1.12566 ++ (set_attr "length" "4")
1.12567 ++ (set_attr "cc" "none")])
1.12568 ++
1.12569 ++
1.12570 ++
1.12571 ++(define_insn "popm"
1.12572 ++ [(unspec [(match_operand 0 "const_int_operand" "")]
1.12573 ++ UNSPEC_POPM)]
1.12574 ++ ""
1.12575 ++ {
1.12576 ++ if (INTVAL(operands[0])) {
1.12577 ++ return "popm %r0";
1.12578 ++ } else {
1.12579 ++ return "";
1.12580 ++ }
1.12581 ++ }
1.12582 ++ [(set_attr "type" "load")
1.12583 ++ (set_attr "length" "2")])
1.12584 ++
1.12585 ++
1.12586 ++
1.12587 ++;;=============================================================================
1.12588 ++;; add
1.12589 ++;;-----------------------------------------------------------------------------
1.12590 ++;; Adds reg1 with reg2 and puts the result in reg0.
1.12591 ++;;=============================================================================
1.12592 ++(define_insn "add<mode>3"
1.12593 ++ [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r")
1.12594 ++ (plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0")
1.12595 ++ (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))]
1.12596 ++ ""
1.12597 ++ "@
1.12598 ++ add %0, %2
1.12599 ++ add %0, %1, %2
1.12600 ++ sub %0, %n2
1.12601 ++ sub %0, %1, %n2
1.12602 ++ sub %0, %n2"
1.12603 ++
1.12604 ++ [(set_attr "length" "2,4,2,4,4")
1.12605 ++ (set_attr "cc" "<INTM:alu_cc_attr>")])
1.12606 ++
1.12607 ++(define_insn "add<mode>3_lsl"
1.12608 ++ [(set (match_operand:INTM 0 "register_operand" "=r")
1.12609 ++ (plus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r")
1.12610 ++ (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))
1.12611 ++ (match_operand:INTM 2 "register_operand" "r")))]
1.12612 ++ ""
1.12613 ++ "add %0, %2, %1 << %3"
1.12614 ++ [(set_attr "length" "4")
1.12615 ++ (set_attr "cc" "<INTM:alu_cc_attr>")])
1.12616 ++
1.12617 ++(define_insn "add<mode>3_lsl2"
1.12618 ++ [(set (match_operand:INTM 0 "register_operand" "=r")
1.12619 ++ (plus:INTM (match_operand:INTM 1 "register_operand" "r")
1.12620 ++ (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
1.12621 ++ (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
1.12622 ++ ""
1.12623 ++ "add %0, %1, %2 << %3"
1.12624 ++ [(set_attr "length" "4")
1.12625 ++ (set_attr "cc" "<INTM:alu_cc_attr>")])
1.12626 ++
1.12627 ++
1.12628 ++(define_insn "add<mode>3_mul"
1.12629 ++ [(set (match_operand:INTM 0 "register_operand" "=r")
1.12630 ++ (plus:INTM (mult:INTM (match_operand:INTM 1 "register_operand" "r")
1.12631 ++ (match_operand:INTM 3 "immediate_operand" "Ku04" ))
1.12632 ++ (match_operand:INTM 2 "register_operand" "r")))]
1.12633 ++ "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
1.12634 ++ (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
1.12635 ++ "add %0, %2, %1 << %p3"
1.12636 ++ [(set_attr "length" "4")
1.12637 ++ (set_attr "cc" "<INTM:alu_cc_attr>")])
1.12638 ++
1.12639 ++(define_insn "add<mode>3_mul2"
1.12640 ++ [(set (match_operand:INTM 0 "register_operand" "=r")
1.12641 ++ (plus:INTM (match_operand:INTM 1 "register_operand" "r")
1.12642 ++ (mult:INTM (match_operand:INTM 2 "register_operand" "r")
1.12643 ++ (match_operand:INTM 3 "immediate_operand" "Ku04" ))))]
1.12644 ++ "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
1.12645 ++ (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
1.12646 ++ "add %0, %1, %2 << %p3"
1.12647 ++ [(set_attr "length" "4")
1.12648 ++ (set_attr "cc" "<INTM:alu_cc_attr>")])
1.12649 ++
1.12650 ++
1.12651 ++(define_peephole2
1.12652 ++ [(set (match_operand:SI 0 "register_operand" "")
1.12653 ++ (ashift:SI (match_operand:SI 1 "register_operand" "")
1.12654 ++ (match_operand:SI 2 "immediate_operand" "")))
1.12655 ++ (set (match_operand:SI 3 "register_operand" "")
1.12656 ++ (plus:SI (match_dup 0)
1.12657 ++ (match_operand:SI 4 "register_operand" "")))]
1.12658 ++ "(peep2_reg_dead_p(2, operands[0]) &&
1.12659 ++ (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
1.12660 ++ [(set (match_dup 3)
1.12661 ++ (plus:SI (ashift:SI (match_dup 1)
1.12662 ++ (match_dup 2))
1.12663 ++ (match_dup 4)))]
1.12664 ++ )
1.12665 ++
1.12666 ++(define_peephole2
1.12667 ++ [(set (match_operand:SI 0 "register_operand" "")
1.12668 ++ (ashift:SI (match_operand:SI 1 "register_operand" "")
1.12669 ++ (match_operand:SI 2 "immediate_operand" "")))
1.12670 ++ (set (match_operand:SI 3 "register_operand" "")
1.12671 ++ (plus:SI (match_operand:SI 4 "register_operand" "")
1.12672 ++ (match_dup 0)))]
1.12673 ++ "(peep2_reg_dead_p(2, operands[0]) &&
1.12674 ++ (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
1.12675 ++ [(set (match_dup 3)
1.12676 ++ (plus:SI (ashift:SI (match_dup 1)
1.12677 ++ (match_dup 2))
1.12678 ++ (match_dup 4)))]
1.12679 ++ )
1.12680 ++
1.12681 ++(define_insn "adddi3"
1.12682 ++ [(set (match_operand:DI 0 "register_operand" "=r,r")
1.12683 ++ (plus:DI (match_operand:DI 1 "register_operand" "%0,r")
1.12684 ++ (match_operand:DI 2 "register_operand" "r,r")))]
1.12685 ++ ""
1.12686 ++ "@
1.12687 ++ add %0, %2\;adc %m0, %m0, %m2
1.12688 ++ add %0, %1, %2\;adc %m0, %m1, %m2"
1.12689 ++ [(set_attr "length" "6,8")
1.12690 ++ (set_attr "type" "alu2")
1.12691 ++ (set_attr "cc" "set_vncz")])
1.12692 ++
1.12693 ++
1.12694 ++(define_insn "add<mode>_imm_predicable"
1.12695 ++ [(set (match_operand:INTM 0 "register_operand" "+r")
1.12696 ++ (plus:INTM (match_dup 0)
1.12697 ++ (match_operand:INTM 1 "avr32_cond_immediate_operand" "%Is08")))]
1.12698 ++ ""
1.12699 ++ "sub%?\t%0, -%1"
1.12700 ++ [(set_attr "length" "4")
1.12701 ++ (set_attr "cc" "cmp_cond_insn")
1.12702 ++ (set_attr "predicable" "yes")]
1.12703 ++)
1.12704 ++
1.12705 ++;;=============================================================================
1.12706 ++;; subtract
1.12707 ++;;-----------------------------------------------------------------------------
1.12708 ++;; Subtract reg2 or immediate value from reg0 and puts the result in reg0.
1.12709 ++;;=============================================================================
1.12710 ++
1.12711 ++(define_insn "sub<mode>3"
1.12712 ++ [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r")
1.12713 ++ (minus:INTM (match_operand:INTM 1 "nonmemory_operand" "0,r,0,r,0,r,Ks08")
1.12714 ++ (match_operand:INTM 2 "nonmemory_operand" "r,r,Ks08,Ks16,Ks21,0,r")))]
1.12715 ++ ""
1.12716 ++ "@
1.12717 ++ sub %0, %2
1.12718 ++ sub %0, %1, %2
1.12719 ++ sub %0, %2
1.12720 ++ sub %0, %1, %2
1.12721 ++ sub %0, %2
1.12722 ++ rsub %0, %1
1.12723 ++ rsub %0, %2, %1"
1.12724 ++ [(set_attr "length" "2,4,2,4,4,2,4")
1.12725 ++ (set_attr "cc" "<INTM:alu_cc_attr>")])
1.12726 ++
1.12727 ++(define_insn "*sub<mode>3_mul"
1.12728 ++ [(set (match_operand:INTM 0 "register_operand" "=r")
1.12729 ++ (minus:INTM (match_operand:INTM 1 "register_operand" "r")
1.12730 ++ (mult:INTM (match_operand:INTM 2 "register_operand" "r")
1.12731 ++ (match_operand:SI 3 "immediate_operand" "Ku04" ))))]
1.12732 ++ "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
1.12733 ++ (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
1.12734 ++ "sub %0, %1, %2 << %p3"
1.12735 ++ [(set_attr "length" "4")
1.12736 ++ (set_attr "cc" "<INTM:alu_cc_attr>")])
1.12737 ++
1.12738 ++(define_insn "*sub<mode>3_lsl"
1.12739 ++ [(set (match_operand:INTM 0 "register_operand" "=r")
1.12740 ++ (minus:INTM (match_operand:INTM 1 "register_operand" "r")
1.12741 ++ (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
1.12742 ++ (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
1.12743 ++ ""
1.12744 ++ "sub %0, %1, %2 << %3"
1.12745 ++ [(set_attr "length" "4")
1.12746 ++ (set_attr "cc" "<INTM:alu_cc_attr>")])
1.12747 ++
1.12748 ++
1.12749 ++(define_insn "subdi3"
1.12750 ++ [(set (match_operand:DI 0 "register_operand" "=r,r")
1.12751 ++ (minus:DI (match_operand:DI 1 "register_operand" "%0,r")
1.12752 ++ (match_operand:DI 2 "register_operand" "r,r")))]
1.12753 ++ ""
1.12754 ++ "@
1.12755 ++ sub %0, %2\;sbc %m0, %m0, %m2
1.12756 ++ sub %0, %1, %2\;sbc %m0, %m1, %m2"
1.12757 ++ [(set_attr "length" "6,8")
1.12758 ++ (set_attr "type" "alu2")
1.12759 ++ (set_attr "cc" "set_vncz")])
1.12760 ++
1.12761 ++
1.12762 ++(define_insn "sub<mode>_imm_predicable"
1.12763 ++ [(set (match_operand:INTM 0 "register_operand" "+r")
1.12764 ++ (minus:INTM (match_dup 0)
1.12765 ++ (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")))]
1.12766 ++ ""
1.12767 ++ "sub%?\t%0, %1"
1.12768 ++ [(set_attr "length" "4")
1.12769 ++ (set_attr "cc" "cmp_cond_insn")
1.12770 ++ (set_attr "predicable" "yes")])
1.12771 ++
1.12772 ++(define_insn "rsub<mode>_imm_predicable"
1.12773 ++ [(set (match_operand:INTM 0 "register_operand" "+r")
1.12774 ++ (minus:INTM (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")
1.12775 ++ (match_dup 0)))]
1.12776 ++ ""
1.12777 ++ "rsub%?\t%0, %1"
1.12778 ++ [(set_attr "length" "4")
1.12779 ++ (set_attr "cc" "cmp_cond_insn")
1.12780 ++ (set_attr "predicable" "yes")])
1.12781 ++
1.12782 ++;;=============================================================================
1.12783 ++;; multiply
1.12784 ++;;-----------------------------------------------------------------------------
1.12785 ++;; Multiply op1 and op2 and put the value in op0.
1.12786 ++;;=============================================================================
1.12787 ++
1.12788 ++
1.12789 ++(define_insn "mulqi3"
1.12790 ++ [(set (match_operand:QI 0 "register_operand" "=r,r,r")
1.12791 ++ (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r")
1.12792 ++ (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))]
1.12793 ++ "!TARGET_NO_MUL_INSNS"
1.12794 ++ {
1.12795 ++ switch (which_alternative){
1.12796 ++ case 0:
1.12797 ++ return "mul %0, %2";
1.12798 ++ case 1:
1.12799 ++ return "mul %0, %1, %2";
1.12800 ++ case 2:
1.12801 ++ return "mul %0, %1, %2";
1.12802 ++ default:
1.12803 ++ gcc_unreachable();
1.12804 ++ }
1.12805 ++ }
1.12806 ++ [(set_attr "type" "mulww_w,mulww_w,mulwh")
1.12807 ++ (set_attr "length" "2,4,4")
1.12808 ++ (set_attr "cc" "none")])
1.12809 ++
1.12810 ++(define_insn "mulsi3"
1.12811 ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1.12812 ++ (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r")
1.12813 ++ (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))]
1.12814 ++ "!TARGET_NO_MUL_INSNS"
1.12815 ++ {
1.12816 ++ switch (which_alternative){
1.12817 ++ case 0:
1.12818 ++ return "mul %0, %2";
1.12819 ++ case 1:
1.12820 ++ return "mul %0, %1, %2";
1.12821 ++ case 2:
1.12822 ++ return "mul %0, %1, %2";
1.12823 ++ default:
1.12824 ++ gcc_unreachable();
1.12825 ++ }
1.12826 ++ }
1.12827 ++ [(set_attr "type" "mulww_w,mulww_w,mulwh")
1.12828 ++ (set_attr "length" "2,4,4")
1.12829 ++ (set_attr "cc" "none")])
1.12830 ++
1.12831 ++
1.12832 ++(define_insn "mulhisi3"
1.12833 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.12834 ++ (mult:SI
1.12835 ++ (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
1.12836 ++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
1.12837 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.12838 ++ "mulhh.w %0, %1:b, %2:b"
1.12839 ++ [(set_attr "type" "mulhh")
1.12840 ++ (set_attr "length" "4")
1.12841 ++ (set_attr "cc" "none")])
1.12842 ++
1.12843 ++(define_peephole2
1.12844 ++ [(match_scratch:DI 6 "r")
1.12845 ++ (set (match_operand:SI 0 "register_operand" "")
1.12846 ++ (mult:SI
1.12847 ++ (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
1.12848 ++ (sign_extend:SI (match_operand:HI 2 "register_operand" ""))))
1.12849 ++ (set (match_operand:SI 3 "register_operand" "")
1.12850 ++ (ashiftrt:SI (match_dup 0)
1.12851 ++ (const_int 16)))]
1.12852 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP
1.12853 ++ && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))"
1.12854 ++ [(set (match_dup 4) (sign_extend:SI (match_dup 1)))
1.12855 ++ (set (match_dup 6)
1.12856 ++ (ashift:DI (mult:DI (sign_extend:DI (match_dup 4))
1.12857 ++ (sign_extend:DI (match_dup 2)))
1.12858 ++ (const_int 16)))
1.12859 ++ (set (match_dup 3) (match_dup 5))]
1.12860 ++
1.12861 ++ "{
1.12862 ++ operands[4] = gen_rtx_REG(SImode, REGNO(operands[1]));
1.12863 ++ operands[5] = gen_highpart (SImode, operands[4]);
1.12864 ++ }"
1.12865 ++ )
1.12866 ++
1.12867 ++(define_insn "mulnhisi3"
1.12868 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.12869 ++ (mult:SI
1.12870 ++ (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r")))
1.12871 ++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
1.12872 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.12873 ++ "mulnhh.w %0, %1:b, %2:b"
1.12874 ++ [(set_attr "type" "mulhh")
1.12875 ++ (set_attr "length" "4")
1.12876 ++ (set_attr "cc" "none")])
1.12877 ++
1.12878 ++(define_insn "machisi3"
1.12879 ++ [(set (match_operand:SI 0 "register_operand" "+r")
1.12880 ++ (plus:SI (mult:SI
1.12881 ++ (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
1.12882 ++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
1.12883 ++ (match_dup 0)))]
1.12884 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.12885 ++ "machh.w %0, %1:b, %2:b"
1.12886 ++ [(set_attr "type" "machh_w")
1.12887 ++ (set_attr "length" "4")
1.12888 ++ (set_attr "cc" "none")])
1.12889 ++
1.12890 ++
1.12891 ++
1.12892 ++(define_insn "mulsidi3"
1.12893 ++ [(set (match_operand:DI 0 "register_operand" "=r")
1.12894 ++ (mult:DI
1.12895 ++ (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
1.12896 ++ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
1.12897 ++ "!TARGET_NO_MUL_INSNS"
1.12898 ++ "muls.d %0, %1, %2"
1.12899 ++ [(set_attr "type" "mulww_d")
1.12900 ++ (set_attr "length" "4")
1.12901 ++ (set_attr "cc" "none")])
1.12902 ++
1.12903 ++(define_insn "umulsidi3"
1.12904 ++ [(set (match_operand:DI 0 "register_operand" "=r")
1.12905 ++ (mult:DI
1.12906 ++ (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
1.12907 ++ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
1.12908 ++ "!TARGET_NO_MUL_INSNS"
1.12909 ++ "mulu.d %0, %1, %2"
1.12910 ++ [(set_attr "type" "mulww_d")
1.12911 ++ (set_attr "length" "4")
1.12912 ++ (set_attr "cc" "none")])
1.12913 ++
1.12914 ++(define_insn "*mulaccsi3"
1.12915 ++ [(set (match_operand:SI 0 "register_operand" "+r")
1.12916 ++ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
1.12917 ++ (match_operand:SI 2 "register_operand" "r"))
1.12918 ++ (match_dup 0)))]
1.12919 ++ "!TARGET_NO_MUL_INSNS"
1.12920 ++ "mac %0, %1, %2"
1.12921 ++ [(set_attr "type" "macww_w")
1.12922 ++ (set_attr "length" "4")
1.12923 ++ (set_attr "cc" "none")])
1.12924 ++
1.12925 ++(define_insn "*mulaccsidi3"
1.12926 ++ [(set (match_operand:DI 0 "register_operand" "+r")
1.12927 ++ (plus:DI (mult:DI
1.12928 ++ (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
1.12929 ++ (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
1.12930 ++ (match_dup 0)))]
1.12931 ++ "!TARGET_NO_MUL_INSNS"
1.12932 ++ "macs.d %0, %1, %2"
1.12933 ++ [(set_attr "type" "macww_d")
1.12934 ++ (set_attr "length" "4")
1.12935 ++ (set_attr "cc" "none")])
1.12936 ++
1.12937 ++(define_insn "*umulaccsidi3"
1.12938 ++ [(set (match_operand:DI 0 "register_operand" "+r")
1.12939 ++ (plus:DI (mult:DI
1.12940 ++ (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
1.12941 ++ (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
1.12942 ++ (match_dup 0)))]
1.12943 ++ "!TARGET_NO_MUL_INSNS"
1.12944 ++ "macu.d %0, %1, %2"
1.12945 ++ [(set_attr "type" "macww_d")
1.12946 ++ (set_attr "length" "4")
1.12947 ++ (set_attr "cc" "none")])
1.12948 ++
1.12949 ++
1.12950 ++
1.12951 ++;; Try to avoid Write-After-Write hazards for mul operations
1.12952 ++;; if it can be done
1.12953 ++(define_peephole2
1.12954 ++ [(set (match_operand:SI 0 "register_operand" "")
1.12955 ++ (mult:SI
1.12956 ++ (sign_extend:SI (match_operand 1 "general_operand" ""))
1.12957 ++ (sign_extend:SI (match_operand 2 "general_operand" ""))))
1.12958 ++ (set (match_dup 0)
1.12959 ++ (match_operator:SI 3 "alu_operator" [(match_dup 0)
1.12960 ++ (match_operand 4 "general_operand" "")]))]
1.12961 ++ "peep2_reg_dead_p(1, operands[2])"
1.12962 ++ [(set (match_dup 5)
1.12963 ++ (mult:SI
1.12964 ++ (sign_extend:SI (match_dup 1))
1.12965 ++ (sign_extend:SI (match_dup 2))))
1.12966 ++ (set (match_dup 0)
1.12967 ++ (match_op_dup 3 [(match_dup 5)
1.12968 ++ (match_dup 4)]))]
1.12969 ++ "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}"
1.12970 ++ )
1.12971 ++
1.12972 ++
1.12973 ++
1.12974 ++;;=============================================================================
1.12975 ++;; DSP instructions
1.12976 ++;;=============================================================================
1.12977 ++(define_insn "mulsathh_h"
1.12978 ++ [(set (match_operand:HI 0 "register_operand" "=r")
1.12979 ++ (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
1.12980 ++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
1.12981 ++ (const_int 15))))]
1.12982 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.12983 ++ "mulsathh.h\t%0, %1:b, %2:b"
1.12984 ++ [(set_attr "length" "4")
1.12985 ++ (set_attr "cc" "none")
1.12986 ++ (set_attr "type" "mulhh")])
1.12987 ++
1.12988 ++(define_insn "mulsatrndhh_h"
1.12989 ++ [(set (match_operand:HI 0 "register_operand" "=r")
1.12990 ++ (ss_truncate:HI (ashiftrt:SI
1.12991 ++ (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
1.12992 ++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
1.12993 ++ (const_int 1073741824))
1.12994 ++ (const_int 15))))]
1.12995 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.12996 ++ "mulsatrndhh.h\t%0, %1:b, %2:b"
1.12997 ++ [(set_attr "length" "4")
1.12998 ++ (set_attr "cc" "none")
1.12999 ++ (set_attr "type" "mulhh")])
1.13000 ++
1.13001 ++(define_insn "mulsathh_w"
1.13002 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13003 ++ (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
1.13004 ++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
1.13005 ++ (const_int 1))))]
1.13006 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.13007 ++ "mulsathh.w\t%0, %1:b, %2:b"
1.13008 ++ [(set_attr "length" "4")
1.13009 ++ (set_attr "cc" "none")
1.13010 ++ (set_attr "type" "mulhh")])
1.13011 ++
1.13012 ++(define_insn "mulsatwh_w"
1.13013 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13014 ++ (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
1.13015 ++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
1.13016 ++ (const_int 15))))]
1.13017 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.13018 ++ "mulsatwh.w\t%0, %1, %2:b"
1.13019 ++ [(set_attr "length" "4")
1.13020 ++ (set_attr "cc" "none")
1.13021 ++ (set_attr "type" "mulwh")])
1.13022 ++
1.13023 ++(define_insn "mulsatrndwh_w"
1.13024 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13025 ++ (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
1.13026 ++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
1.13027 ++ (const_int 1073741824))
1.13028 ++ (const_int 15))))]
1.13029 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.13030 ++ "mulsatrndwh.w\t%0, %1, %2:b"
1.13031 ++ [(set_attr "length" "4")
1.13032 ++ (set_attr "cc" "none")
1.13033 ++ (set_attr "type" "mulwh")])
1.13034 ++
1.13035 ++(define_insn "macsathh_w"
1.13036 ++ [(set (match_operand:SI 0 "register_operand" "+r")
1.13037 ++ (plus:SI (match_dup 0)
1.13038 ++ (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
1.13039 ++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
1.13040 ++ (const_int 1)))))]
1.13041 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.13042 ++ "macsathh.w\t%0, %1:b, %2:b"
1.13043 ++ [(set_attr "length" "4")
1.13044 ++ (set_attr "cc" "none")
1.13045 ++ (set_attr "type" "mulhh")])
1.13046 ++
1.13047 ++
1.13048 ++(define_insn "mulwh_d"
1.13049 ++ [(set (match_operand:DI 0 "register_operand" "=r")
1.13050 ++ (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
1.13051 ++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
1.13052 ++ (const_int 16)))]
1.13053 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.13054 ++ "mulwh.d\t%0, %1, %2:b"
1.13055 ++ [(set_attr "length" "4")
1.13056 ++ (set_attr "cc" "none")
1.13057 ++ (set_attr "type" "mulwh")])
1.13058 ++
1.13059 ++
1.13060 ++(define_insn "mulnwh_d"
1.13061 ++ [(set (match_operand:DI 0 "register_operand" "=r")
1.13062 ++ (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))
1.13063 ++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
1.13064 ++ (const_int 16)))]
1.13065 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.13066 ++ "mulnwh.d\t%0, %1, %2:b"
1.13067 ++ [(set_attr "length" "4")
1.13068 ++ (set_attr "cc" "none")
1.13069 ++ (set_attr "type" "mulwh")])
1.13070 ++
1.13071 ++(define_insn "macwh_d"
1.13072 ++ [(set (match_operand:DI 0 "register_operand" "+r")
1.13073 ++ (plus:DI (match_dup 0)
1.13074 ++ (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
1.13075 ++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
1.13076 ++ (const_int 16))))]
1.13077 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.13078 ++ "macwh.d\t%0, %1, %2:b"
1.13079 ++ [(set_attr "length" "4")
1.13080 ++ (set_attr "cc" "none")
1.13081 ++ (set_attr "type" "mulwh")])
1.13082 ++
1.13083 ++(define_insn "machh_d"
1.13084 ++ [(set (match_operand:DI 0 "register_operand" "+r")
1.13085 ++ (plus:DI (match_dup 0)
1.13086 ++ (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
1.13087 ++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))]
1.13088 ++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
1.13089 ++ "machh.d\t%0, %1:b, %2:b"
1.13090 ++ [(set_attr "length" "4")
1.13091 ++ (set_attr "cc" "none")
1.13092 ++ (set_attr "type" "mulwh")])
1.13093 ++
1.13094 ++(define_insn "satadd_w"
1.13095 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13096 ++ (ss_plus:SI (match_operand:SI 1 "register_operand" "r")
1.13097 ++ (match_operand:SI 2 "register_operand" "r")))]
1.13098 ++ "TARGET_DSP"
1.13099 ++ "satadd.w\t%0, %1, %2"
1.13100 ++ [(set_attr "length" "4")
1.13101 ++ (set_attr "cc" "none")
1.13102 ++ (set_attr "type" "alu_sat")])
1.13103 ++
1.13104 ++(define_insn "satsub_w"
1.13105 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13106 ++ (ss_minus:SI (match_operand:SI 1 "register_operand" "r")
1.13107 ++ (match_operand:SI 2 "register_operand" "r")))]
1.13108 ++ "TARGET_DSP"
1.13109 ++ "satsub.w\t%0, %1, %2"
1.13110 ++ [(set_attr "length" "4")
1.13111 ++ (set_attr "cc" "none")
1.13112 ++ (set_attr "type" "alu_sat")])
1.13113 ++
1.13114 ++(define_insn "satadd_h"
1.13115 ++ [(set (match_operand:HI 0 "register_operand" "=r")
1.13116 ++ (ss_plus:HI (match_operand:HI 1 "register_operand" "r")
1.13117 ++ (match_operand:HI 2 "register_operand" "r")))]
1.13118 ++ "TARGET_DSP"
1.13119 ++ "satadd.h\t%0, %1, %2"
1.13120 ++ [(set_attr "length" "4")
1.13121 ++ (set_attr "cc" "none")
1.13122 ++ (set_attr "type" "alu_sat")])
1.13123 ++
1.13124 ++(define_insn "satsub_h"
1.13125 ++ [(set (match_operand:HI 0 "register_operand" "=r")
1.13126 ++ (ss_minus:HI (match_operand:HI 1 "register_operand" "r")
1.13127 ++ (match_operand:HI 2 "register_operand" "r")))]
1.13128 ++ "TARGET_DSP"
1.13129 ++ "satsub.h\t%0, %1, %2"
1.13130 ++ [(set_attr "length" "4")
1.13131 ++ (set_attr "cc" "none")
1.13132 ++ (set_attr "type" "alu_sat")])
1.13133 ++
1.13134 ++
1.13135 ++;;=============================================================================
1.13136 ++;; smin
1.13137 ++;;-----------------------------------------------------------------------------
1.13138 ++;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed
1.13139 ++;; values in the registers.
1.13140 ++;;=============================================================================
1.13141 ++(define_insn "sminsi3"
1.13142 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13143 ++ (smin:SI (match_operand:SI 1 "register_operand" "r")
1.13144 ++ (match_operand:SI 2 "register_operand" "r")))]
1.13145 ++ ""
1.13146 ++ "min %0, %1, %2"
1.13147 ++ [(set_attr "length" "4")
1.13148 ++ (set_attr "cc" "none")])
1.13149 ++
1.13150 ++;;=============================================================================
1.13151 ++;; smax
1.13152 ++;;-----------------------------------------------------------------------------
1.13153 ++;; Set reg0 to the largest value of reg1 and reg2. It is used for signed
1.13154 ++;; values in the registers.
1.13155 ++;;=============================================================================
1.13156 ++(define_insn "smaxsi3"
1.13157 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13158 ++ (smax:SI (match_operand:SI 1 "register_operand" "r")
1.13159 ++ (match_operand:SI 2 "register_operand" "r")))]
1.13160 ++ ""
1.13161 ++ "max %0, %1, %2"
1.13162 ++ [(set_attr "length" "4")
1.13163 ++ (set_attr "cc" "none")])
1.13164 ++
1.13165 ++
1.13166 ++
1.13167 ++;;=============================================================================
1.13168 ++;; Logical operations
1.13169 ++;;-----------------------------------------------------------------------------
1.13170 ++
1.13171 ++
1.13172 ++;; Split up simple DImode logical operations. Simply perform the logical
1.13173 ++;; operation on the upper and lower halves of the registers.
1.13174 ++(define_split
1.13175 ++ [(set (match_operand:DI 0 "register_operand" "")
1.13176 ++ (match_operator:DI 6 "logical_binary_operator"
1.13177 ++ [(match_operand:DI 1 "register_operand" "")
1.13178 ++ (match_operand:DI 2 "register_operand" "")]))]
1.13179 ++ "reload_completed"
1.13180 ++ [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
1.13181 ++ (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
1.13182 ++ "
1.13183 ++ {
1.13184 ++ operands[3] = gen_highpart (SImode, operands[0]);
1.13185 ++ operands[0] = gen_lowpart (SImode, operands[0]);
1.13186 ++ operands[4] = gen_highpart (SImode, operands[1]);
1.13187 ++ operands[1] = gen_lowpart (SImode, operands[1]);
1.13188 ++ operands[5] = gen_highpart (SImode, operands[2]);
1.13189 ++ operands[2] = gen_lowpart (SImode, operands[2]);
1.13190 ++ }"
1.13191 ++)
1.13192 ++
1.13193 ++;;=============================================================================
1.13194 ++;; Logical operations with shifted operand
1.13195 ++;;=============================================================================
1.13196 ++(define_insn "<code>si_lshift"
1.13197 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13198 ++ (logical:SI (match_operator:SI 4 "logical_shift_operator"
1.13199 ++ [(match_operand:SI 2 "register_operand" "r")
1.13200 ++ (match_operand:SI 3 "immediate_operand" "Ku05")])
1.13201 ++ (match_operand:SI 1 "register_operand" "r")))]
1.13202 ++ ""
1.13203 ++ {
1.13204 ++ if ( GET_CODE(operands[4]) == ASHIFT )
1.13205 ++ return "<logical_insn>\t%0, %1, %2 << %3";
1.13206 ++ else
1.13207 ++ return "<logical_insn>\t%0, %1, %2 >> %3";
1.13208 ++ }
1.13209 ++
1.13210 ++ [(set_attr "cc" "set_z")]
1.13211 ++)
1.13212 ++
1.13213 ++
1.13214 ++;;************************************************
1.13215 ++;; Peepholes for detecting logical operantions
1.13216 ++;; with shifted operands
1.13217 ++;;************************************************
1.13218 ++
1.13219 ++(define_peephole
1.13220 ++ [(set (match_operand:SI 3 "register_operand" "")
1.13221 ++ (match_operator:SI 5 "logical_shift_operator"
1.13222 ++ [(match_operand:SI 1 "register_operand" "")
1.13223 ++ (match_operand:SI 2 "immediate_operand" "")]))
1.13224 ++ (set (match_operand:SI 0 "register_operand" "")
1.13225 ++ (logical:SI (match_operand:SI 4 "register_operand" "")
1.13226 ++ (match_dup 3)))]
1.13227 ++ "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
1.13228 ++ {
1.13229 ++ if ( GET_CODE(operands[5]) == ASHIFT )
1.13230 ++ return "<logical_insn>\t%0, %4, %1 << %2";
1.13231 ++ else
1.13232 ++ return "<logical_insn>\t%0, %4, %1 >> %2";
1.13233 ++ }
1.13234 ++ [(set_attr "cc" "set_z")]
1.13235 ++ )
1.13236 ++
1.13237 ++(define_peephole
1.13238 ++ [(set (match_operand:SI 3 "register_operand" "")
1.13239 ++ (match_operator:SI 5 "logical_shift_operator"
1.13240 ++ [(match_operand:SI 1 "register_operand" "")
1.13241 ++ (match_operand:SI 2 "immediate_operand" "")]))
1.13242 ++ (set (match_operand:SI 0 "register_operand" "")
1.13243 ++ (logical:SI (match_dup 3)
1.13244 ++ (match_operand:SI 4 "register_operand" "")))]
1.13245 ++ "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
1.13246 ++ {
1.13247 ++ if ( GET_CODE(operands[5]) == ASHIFT )
1.13248 ++ return "<logical_insn>\t%0, %4, %1 << %2";
1.13249 ++ else
1.13250 ++ return "<logical_insn>\t%0, %4, %1 >> %2";
1.13251 ++ }
1.13252 ++ [(set_attr "cc" "set_z")]
1.13253 ++ )
1.13254 ++
1.13255 ++
1.13256 ++(define_peephole2
1.13257 ++ [(set (match_operand:SI 0 "register_operand" "")
1.13258 ++ (match_operator:SI 5 "logical_shift_operator"
1.13259 ++ [(match_operand:SI 1 "register_operand" "")
1.13260 ++ (match_operand:SI 2 "immediate_operand" "")]))
1.13261 ++ (set (match_operand:SI 3 "register_operand" "")
1.13262 ++ (logical:SI (match_operand:SI 4 "register_operand" "")
1.13263 ++ (match_dup 0)))]
1.13264 ++ "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
1.13265 ++
1.13266 ++ [(set (match_dup 3)
1.13267 ++ (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
1.13268 ++ (match_dup 4)))]
1.13269 ++
1.13270 ++ ""
1.13271 ++)
1.13272 ++
1.13273 ++(define_peephole2
1.13274 ++ [(set (match_operand:SI 0 "register_operand" "")
1.13275 ++ (match_operator:SI 5 "logical_shift_operator"
1.13276 ++ [(match_operand:SI 1 "register_operand" "")
1.13277 ++ (match_operand:SI 2 "immediate_operand" "")]))
1.13278 ++ (set (match_operand:SI 3 "register_operand" "")
1.13279 ++ (logical:SI (match_dup 0)
1.13280 ++ (match_operand:SI 4 "register_operand" "")))]
1.13281 ++ "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
1.13282 ++
1.13283 ++ [(set (match_dup 3)
1.13284 ++ (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
1.13285 ++ (match_dup 4)))]
1.13286 ++
1.13287 ++ ""
1.13288 ++)
1.13289 ++
1.13290 ++
1.13291 ++;;=============================================================================
1.13292 ++;; and
1.13293 ++;;-----------------------------------------------------------------------------
1.13294 ++;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0.
1.13295 ++;;=============================================================================
1.13296 ++
1.13297 ++(define_insn "andnsi"
1.13298 ++ [(set (match_operand:SI 0 "register_operand" "+r")
1.13299 ++ (and:SI (match_dup 0)
1.13300 ++ (not:SI (match_operand:SI 1 "register_operand" "r"))))]
1.13301 ++ ""
1.13302 ++ "andn %0, %1"
1.13303 ++ [(set_attr "cc" "set_z")
1.13304 ++ (set_attr "length" "2")]
1.13305 ++)
1.13306 ++
1.13307 ++
1.13308 ++(define_insn "andsi3"
1.13309 ++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r,r, r, r,r,r,r,r")
1.13310 ++ (and:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,r,0,0, 0, 0,0,0,0,r" )
1.13311 ++ (match_operand:SI 2 "nonmemory_operand" " N,M,N,Ku16,Ks17,J,L,r,i,r")))]
1.13312 ++ ""
1.13313 ++ "@
1.13314 ++ memc\t%0, %z2
1.13315 ++ bfextu\t%0, %1, 0, %z2
1.13316 ++ cbr\t%0, %z2
1.13317 ++ andl\t%0, %2, COH
1.13318 ++ andl\t%0, lo(%2)
1.13319 ++ andh\t%0, hi(%2), COH
1.13320 ++ andh\t%0, hi(%2)
1.13321 ++ and\t%0, %2
1.13322 ++ andh\t%0, hi(%2)\;andl\t%0, lo(%2)
1.13323 ++ and\t%0, %1, %2"
1.13324 ++
1.13325 ++ [(set_attr "length" "4,4,2,4,4,4,4,2,8,4")
1.13326 ++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z")])
1.13327 ++
1.13328 ++(define_insn "anddi3"
1.13329 ++ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
1.13330 ++ (and:DI (match_operand:DI 1 "register_operand" "%0,r")
1.13331 ++ (match_operand:DI 2 "register_operand" "r,r")))]
1.13332 ++ ""
1.13333 ++ "#"
1.13334 ++ [(set_attr "length" "8")
1.13335 ++ (set_attr "cc" "clobber")]
1.13336 ++)
1.13337 ++
1.13338 ++;;=============================================================================
1.13339 ++;; or
1.13340 ++;;-----------------------------------------------------------------------------
1.13341 ++;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0.
1.13342 ++;;=============================================================================
1.13343 ++
1.13344 ++(define_insn "iorsi3"
1.13345 ++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r, r,r,r,r")
1.13346 ++ (ior:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0,0, 0,0,0,r" )
1.13347 ++ (match_operand:SI 2 "nonmemory_operand" " O,O,Ku16,J,r,i,r")))]
1.13348 ++ ""
1.13349 ++ "@
1.13350 ++ mems\t%0, %p2
1.13351 ++ sbr\t%0, %p2
1.13352 ++ orl\t%0, %2
1.13353 ++ orh\t%0, hi(%2)
1.13354 ++ or\t%0, %2
1.13355 ++ orh\t%0, hi(%2)\;orl\t%0, lo(%2)
1.13356 ++ or\t%0, %1, %2"
1.13357 ++
1.13358 ++ [(set_attr "length" "4,2,4,4,2,8,4")
1.13359 ++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z")])
1.13360 ++
1.13361 ++
1.13362 ++(define_insn "iordi3"
1.13363 ++ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
1.13364 ++ (ior:DI (match_operand:DI 1 "register_operand" "%0,r")
1.13365 ++ (match_operand:DI 2 "register_operand" "r,r")))]
1.13366 ++ ""
1.13367 ++ "#"
1.13368 ++ [(set_attr "length" "8")
1.13369 ++ (set_attr "cc" "clobber")]
1.13370 ++)
1.13371 ++
1.13372 ++;;=============================================================================
1.13373 ++;; xor bytes
1.13374 ++;;-----------------------------------------------------------------------------
1.13375 ++;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0.
1.13376 ++;;=============================================================================
1.13377 ++
1.13378 ++(define_insn "xorsi3"
1.13379 ++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r, r,r,r,r")
1.13380 ++ (xor:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0, 0,0,0,r" )
1.13381 ++ (match_operand:SI 2 "nonmemory_operand" " O,Ku16,J,r,i,r")))]
1.13382 ++ ""
1.13383 ++ "@
1.13384 ++ memt\t%0, %p2
1.13385 ++ eorl\t%0, %2
1.13386 ++ eorh\t%0, hi(%2)
1.13387 ++ eor\t%0, %2
1.13388 ++ eorh\t%0, hi(%2)\;eorl\t%0, lo(%2)
1.13389 ++ eor\t%0, %1, %2"
1.13390 ++
1.13391 ++ [(set_attr "length" "4,4,4,2,8,4")
1.13392 ++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z")])
1.13393 ++
1.13394 ++
1.13395 ++(define_insn "xordi3"
1.13396 ++ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
1.13397 ++ (xor:DI (match_operand:DI 1 "register_operand" "%0,r")
1.13398 ++ (match_operand:DI 2 "register_operand" "r,r")))]
1.13399 ++ ""
1.13400 ++ "#"
1.13401 ++ [(set_attr "length" "8")
1.13402 ++ (set_attr "cc" "clobber")]
1.13403 ++)
1.13404 ++
1.13405 ++;;=============================================================================
1.13406 ++;; Three operand predicable insns
1.13407 ++;;=============================================================================
1.13408 ++
1.13409 ++(define_insn "<predicable_insn3><mode>_predicable"
1.13410 ++ [(set (match_operand:INTM 0 "register_operand" "=r")
1.13411 ++ (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
1.13412 ++ (match_operand:INTM 2 "register_operand" "r")))]
1.13413 ++ "TARGET_V2_INSNS"
1.13414 ++ "<predicable_insn3>%?\t%0, %1, %2"
1.13415 ++ [(set_attr "length" "4")
1.13416 ++ (set_attr "cc" "cmp_cond_insn")
1.13417 ++ (set_attr "predicable" "yes")]
1.13418 ++)
1.13419 ++
1.13420 ++(define_insn_and_split "<predicable_insn3><mode>_imm_clobber_predicable"
1.13421 ++ [(parallel
1.13422 ++ [(set (match_operand:INTM 0 "register_operand" "=r")
1.13423 ++ (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
1.13424 ++ (match_operand:INTM 2 "avr32_mov_immediate_operand" "JKs21")))
1.13425 ++ (clobber (match_operand:INTM 3 "register_operand" "=&r"))])]
1.13426 ++ "TARGET_V2_INSNS"
1.13427 ++ {
1.13428 ++ if ( current_insn_predicate != NULL_RTX )
1.13429 ++ {
1.13430 ++ if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
1.13431 ++ return "%! mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
1.13432 ++ else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
1.13433 ++ return "%! mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
1.13434 ++ else
1.13435 ++ return "%! movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
1.13436 ++ }
1.13437 ++ else
1.13438 ++ {
1.13439 ++ if ( !avr32_cond_imm_clobber_splittable (insn, operands) )
1.13440 ++ {
1.13441 ++ if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
1.13442 ++ return "mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
1.13443 ++ else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
1.13444 ++ return "mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
1.13445 ++ else
1.13446 ++ return "movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
1.13447 ++ }
1.13448 ++ return "#";
1.13449 ++ }
1.13450 ++
1.13451 ++ }
1.13452 ++ ;; If we find out that we could not actually do if-conversion on the block
1.13453 ++ ;; containing this insn we convert it back to normal immediate format
1.13454 ++ ;; to avoid outputing a redundant move insn
1.13455 ++ ;; Do not split until after we have checked if we can make the insn
1.13456 ++ ;; conditional.
1.13457 ++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
1.13458 ++ && cfun->machine->ifcvt_after_reload
1.13459 ++ && avr32_cond_imm_clobber_splittable (insn, operands))"
1.13460 ++ [(set (match_dup 0)
1.13461 ++ (predicable_op3:INTM (match_dup 1)
1.13462 ++ (match_dup 2)))]
1.13463 ++ ""
1.13464 ++ [(set_attr "length" "8")
1.13465 ++ (set_attr "cc" "cmp_cond_insn")
1.13466 ++ (set_attr "predicable" "yes")]
1.13467 ++ )
1.13468 ++
1.13469 ++
1.13470 ++;;=============================================================================
1.13471 ++;; Zero extend predicable insns
1.13472 ++;;=============================================================================
1.13473 ++(define_insn_and_split "zero_extendhisi_clobber_predicable"
1.13474 ++ [(parallel
1.13475 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13476 ++ (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))
1.13477 ++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
1.13478 ++ "TARGET_V2_INSNS"
1.13479 ++ {
1.13480 ++ if ( current_insn_predicate != NULL_RTX )
1.13481 ++ {
1.13482 ++ return "%! mov\t%2, 0xffff\;and%?\t%0, %1, %2";
1.13483 ++ }
1.13484 ++ else
1.13485 ++ {
1.13486 ++ return "#";
1.13487 ++ }
1.13488 ++
1.13489 ++ }
1.13490 ++ ;; If we find out that we could not actually do if-conversion on the block
1.13491 ++ ;; containing this insn we convert it back to normal immediate format
1.13492 ++ ;; to avoid outputing a redundant move insn
1.13493 ++ ;; Do not split until after we have checked if we can make the insn
1.13494 ++ ;; conditional.
1.13495 ++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
1.13496 ++ && cfun->machine->ifcvt_after_reload)"
1.13497 ++ [(set (match_dup 0)
1.13498 ++ (zero_extend:SI (match_dup 1)))]
1.13499 ++ ""
1.13500 ++ [(set_attr "length" "8")
1.13501 ++ (set_attr "cc" "cmp_cond_insn")
1.13502 ++ (set_attr "predicable" "yes")]
1.13503 ++ )
1.13504 ++
1.13505 ++(define_insn_and_split "zero_extendqisi_clobber_predicable"
1.13506 ++ [(parallel
1.13507 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13508 ++ (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))
1.13509 ++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
1.13510 ++ "TARGET_V2_INSNS"
1.13511 ++ {
1.13512 ++ if ( current_insn_predicate != NULL_RTX )
1.13513 ++ {
1.13514 ++ return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
1.13515 ++ }
1.13516 ++ else
1.13517 ++ {
1.13518 ++ return "#";
1.13519 ++ }
1.13520 ++
1.13521 ++ }
1.13522 ++ ;; If we find out that we could not actually do if-conversion on the block
1.13523 ++ ;; containing this insn we convert it back to normal immediate format
1.13524 ++ ;; to avoid outputing a redundant move insn
1.13525 ++ ;; Do not split until after we have checked if we can make the insn
1.13526 ++ ;; conditional.
1.13527 ++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
1.13528 ++ && cfun->machine->ifcvt_after_reload)"
1.13529 ++ [(set (match_dup 0)
1.13530 ++ (zero_extend:SI (match_dup 1)))]
1.13531 ++ ""
1.13532 ++ [(set_attr "length" "8")
1.13533 ++ (set_attr "cc" "cmp_cond_insn")
1.13534 ++ (set_attr "predicable" "yes")]
1.13535 ++ )
1.13536 ++
1.13537 ++(define_insn_and_split "zero_extendqihi_clobber_predicable"
1.13538 ++ [(parallel
1.13539 ++ [(set (match_operand:HI 0 "register_operand" "=r")
1.13540 ++ (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))
1.13541 ++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
1.13542 ++ "TARGET_V2_INSNS"
1.13543 ++ {
1.13544 ++ if ( current_insn_predicate != NULL_RTX )
1.13545 ++ {
1.13546 ++ return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
1.13547 ++ }
1.13548 ++ else
1.13549 ++ {
1.13550 ++ return "#";
1.13551 ++ }
1.13552 ++
1.13553 ++ }
1.13554 ++ ;; If we find out that we could not actually do if-conversion on the block
1.13555 ++ ;; containing this insn we convert it back to normal immediate format
1.13556 ++ ;; to avoid outputing a redundant move insn
1.13557 ++ ;; Do not split until after we have checked if we can make the insn
1.13558 ++ ;; conditional.
1.13559 ++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
1.13560 ++ && cfun->machine->ifcvt_after_reload)"
1.13561 ++ [(set (match_dup 0)
1.13562 ++ (zero_extend:HI (match_dup 1)))]
1.13563 ++ ""
1.13564 ++ [(set_attr "length" "8")
1.13565 ++ (set_attr "cc" "cmp_cond_insn")
1.13566 ++ (set_attr "predicable" "yes")]
1.13567 ++ )
1.13568 ++;;=============================================================================
1.13569 ++;; divmod
1.13570 ++;;-----------------------------------------------------------------------------
1.13571 ++;; Signed division that produces both a quotient and a remainder.
1.13572 ++;;=============================================================================
1.13573 ++(define_expand "divmodsi4"
1.13574 ++ [(parallel [
1.13575 ++ (parallel [
1.13576 ++ (set (match_operand:SI 0 "register_operand" "=r")
1.13577 ++ (div:SI (match_operand:SI 1 "register_operand" "r")
1.13578 ++ (match_operand:SI 2 "register_operand" "r")))
1.13579 ++ (set (match_operand:SI 3 "register_operand" "=r")
1.13580 ++ (mod:SI (match_dup 1)
1.13581 ++ (match_dup 2)))])
1.13582 ++ (use (match_dup 4))])]
1.13583 ++ ""
1.13584 ++ {
1.13585 ++ if (! no_new_pseudos) {
1.13586 ++ operands[4] = gen_reg_rtx (DImode);
1.13587 ++
1.13588 ++ emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2]));
1.13589 ++ emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
1.13590 ++ emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
1.13591 ++
1.13592 ++ DONE;
1.13593 ++ } else {
1.13594 ++ FAIL;
1.13595 ++ }
1.13596 ++
1.13597 ++ })
1.13598 ++
1.13599 ++
1.13600 ++(define_insn "divmodsi4_internal"
1.13601 ++ [(set (match_operand:DI 0 "register_operand" "=r")
1.13602 ++ (unspec:DI [(match_operand:SI 1 "register_operand" "r")
1.13603 ++ (match_operand:SI 2 "register_operand" "r")]
1.13604 ++ UNSPEC_DIVMODSI4_INTERNAL))]
1.13605 ++ ""
1.13606 ++ "divs %0, %1, %2"
1.13607 ++ [(set_attr "type" "div")
1.13608 ++ (set_attr "cc" "none")])
1.13609 ++
1.13610 ++
1.13611 ++;;=============================================================================
1.13612 ++;; udivmod
1.13613 ++;;-----------------------------------------------------------------------------
1.13614 ++;; Unsigned division that produces both a quotient and a remainder.
1.13615 ++;;=============================================================================
1.13616 ++(define_expand "udivmodsi4"
1.13617 ++ [(parallel [
1.13618 ++ (parallel [
1.13619 ++ (set (match_operand:SI 0 "register_operand" "=r")
1.13620 ++ (udiv:SI (match_operand:SI 1 "register_operand" "r")
1.13621 ++ (match_operand:SI 2 "register_operand" "r")))
1.13622 ++ (set (match_operand:SI 3 "register_operand" "=r")
1.13623 ++ (umod:SI (match_dup 1)
1.13624 ++ (match_dup 2)))])
1.13625 ++ (use (match_dup 4))])]
1.13626 ++ ""
1.13627 ++ {
1.13628 ++ if (! no_new_pseudos) {
1.13629 ++ operands[4] = gen_reg_rtx (DImode);
1.13630 ++
1.13631 ++ emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2]));
1.13632 ++ emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
1.13633 ++ emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
1.13634 ++
1.13635 ++ DONE;
1.13636 ++ } else {
1.13637 ++ FAIL;
1.13638 ++ }
1.13639 ++ })
1.13640 ++
1.13641 ++(define_insn "udivmodsi4_internal"
1.13642 ++ [(set (match_operand:DI 0 "register_operand" "=r")
1.13643 ++ (unspec:DI [(match_operand:SI 1 "register_operand" "r")
1.13644 ++ (match_operand:SI 2 "register_operand" "r")]
1.13645 ++ UNSPEC_UDIVMODSI4_INTERNAL))]
1.13646 ++ ""
1.13647 ++ "divu %0, %1, %2"
1.13648 ++ [(set_attr "type" "div")
1.13649 ++ (set_attr "cc" "none")])
1.13650 ++
1.13651 ++
1.13652 ++;;=============================================================================
1.13653 ++;; Arithmetic-shift left
1.13654 ++;;-----------------------------------------------------------------------------
1.13655 ++;; Arithmetic-shift reg0 left by reg2 or immediate value.
1.13656 ++;;=============================================================================
1.13657 ++
1.13658 ++(define_insn "ashlsi3"
1.13659 ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1.13660 ++ (ashift:SI (match_operand:SI 1 "register_operand" "r,0,r")
1.13661 ++ (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
1.13662 ++ ""
1.13663 ++ "@
1.13664 ++ lsl %0, %1, %2
1.13665 ++ lsl %0, %2
1.13666 ++ lsl %0, %1, %2"
1.13667 ++ [(set_attr "length" "4,2,4")
1.13668 ++ (set_attr "cc" "set_ncz")])
1.13669 ++
1.13670 ++;;=============================================================================
1.13671 ++;; Arithmetic-shift right
1.13672 ++;;-----------------------------------------------------------------------------
1.13673 ++;; Arithmetic-shift reg0 right by an immediate value.
1.13674 ++;;=============================================================================
1.13675 ++
1.13676 ++(define_insn "ashrsi3"
1.13677 ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1.13678 ++ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
1.13679 ++ (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
1.13680 ++ ""
1.13681 ++ "@
1.13682 ++ asr %0, %1, %2
1.13683 ++ asr %0, %2
1.13684 ++ asr %0, %1, %2"
1.13685 ++ [(set_attr "length" "4,2,4")
1.13686 ++ (set_attr "cc" "set_ncz")])
1.13687 ++
1.13688 ++;;=============================================================================
1.13689 ++;; Logical shift right
1.13690 ++;;-----------------------------------------------------------------------------
1.13691 ++;; Logical shift reg0 right by an immediate value.
1.13692 ++;;=============================================================================
1.13693 ++
1.13694 ++(define_insn "lshrsi3"
1.13695 ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1.13696 ++ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
1.13697 ++ (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
1.13698 ++ ""
1.13699 ++ "@
1.13700 ++ lsr %0, %1, %2
1.13701 ++ lsr %0, %2
1.13702 ++ lsr %0, %1, %2"
1.13703 ++ [(set_attr "length" "4,2,4")
1.13704 ++ (set_attr "cc" "set_ncz")])
1.13705 ++
1.13706 ++
1.13707 ++;;=============================================================================
1.13708 ++;; neg
1.13709 ++;;-----------------------------------------------------------------------------
1.13710 ++;; Negate operand 1 and store the result in operand 0.
1.13711 ++;;=============================================================================
1.13712 ++(define_insn "negsi2"
1.13713 ++ [(set (match_operand:SI 0 "register_operand" "=r,r")
1.13714 ++ (neg:SI (match_operand:SI 1 "register_operand" "0,r")))]
1.13715 ++ ""
1.13716 ++ "@
1.13717 ++ neg\t%0
1.13718 ++ rsub\t%0, %1, 0"
1.13719 ++ [(set_attr "length" "2,4")
1.13720 ++ (set_attr "cc" "set_vncz")])
1.13721 ++
1.13722 ++(define_insn "negsi2_predicable"
1.13723 ++ [(set (match_operand:SI 0 "register_operand" "+r")
1.13724 ++ (neg:SI (match_dup 0)))]
1.13725 ++ "TARGET_V2_INSNS"
1.13726 ++ "rsub%?\t%0, 0"
1.13727 ++ [(set_attr "length" "4")
1.13728 ++ (set_attr "cc" "cmp_cond_insn")
1.13729 ++ (set_attr "predicable" "yes")])
1.13730 ++
1.13731 ++;;=============================================================================
1.13732 ++;; abs
1.13733 ++;;-----------------------------------------------------------------------------
1.13734 ++;; Store the absolute value of operand 1 into operand 0.
1.13735 ++;;=============================================================================
1.13736 ++(define_insn "abssi2"
1.13737 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.13738 ++ (abs:SI (match_operand:SI 1 "register_operand" "0")))]
1.13739 ++ ""
1.13740 ++ "abs\t%0"
1.13741 ++ [(set_attr "length" "2")
1.13742 ++ (set_attr "cc" "set_z")])
1.13743 ++
1.13744 ++
1.13745 ++;;=============================================================================
1.13746 ++;; one_cmpl
1.13747 ++;;-----------------------------------------------------------------------------
1.13748 ++;; Store the bitwise-complement of operand 1 into operand 0.
1.13749 ++;;=============================================================================
1.13750 ++
1.13751 ++(define_insn "one_cmplsi2"
1.13752 ++ [(set (match_operand:SI 0 "register_operand" "=r,r")
1.13753 ++ (not:SI (match_operand:SI 1 "register_operand" "0,r")))]
1.13754 ++ ""
1.13755 ++ "@
1.13756 ++ com\t%0
1.13757 ++ rsub\t%0, %1, -1"
1.13758 ++ [(set_attr "length" "2,4")
1.13759 ++ (set_attr "cc" "set_z")])
1.13760 ++
1.13761 ++
1.13762 ++(define_insn "one_cmplsi2_predicable"
1.13763 ++ [(set (match_operand:SI 0 "register_operand" "+r")
1.13764 ++ (not:SI (match_dup 0)))]
1.13765 ++ "TARGET_V2_INSNS"
1.13766 ++ "rsub%?\t%0, -1"
1.13767 ++ [(set_attr "length" "4")
1.13768 ++ (set_attr "cc" "cmp_cond_insn")
1.13769 ++ (set_attr "predicable" "yes")])
1.13770 ++
1.13771 ++
1.13772 ++;;=============================================================================
1.13773 ++;; Bit load
1.13774 ++;;-----------------------------------------------------------------------------
1.13775 ++;; Load a bit into Z and C flags
1.13776 ++;;=============================================================================
1.13777 ++(define_insn "bldsi"
1.13778 ++ [(set (cc0)
1.13779 ++ (and:SI (match_operand:SI 0 "register_operand" "r")
1.13780 ++ (match_operand:SI 1 "one_bit_set_operand" "i")))]
1.13781 ++ ""
1.13782 ++ "bld\t%0, %p1"
1.13783 ++ [(set_attr "length" "4")
1.13784 ++ (set_attr "cc" "bld")]
1.13785 ++ )
1.13786 ++
1.13787 ++
1.13788 ++;;=============================================================================
1.13789 ++;; Compare
1.13790 ++;;-----------------------------------------------------------------------------
1.13791 ++;; Compare reg0 with reg1 or an immediate value.
1.13792 ++;;=============================================================================
1.13793 ++
1.13794 ++(define_expand "cmp<mode>"
1.13795 ++ [(set (cc0)
1.13796 ++ (compare:CMP
1.13797 ++ (match_operand:CMP 0 "register_operand" "")
1.13798 ++ (match_operand:CMP 1 "<CMP:cmp_predicate>" "")))]
1.13799 ++ ""
1.13800 ++ "{
1.13801 ++ avr32_compare_op0 = operands[0];
1.13802 ++ avr32_compare_op1 = operands[1];
1.13803 ++ }"
1.13804 ++)
1.13805 ++
1.13806 ++(define_insn "cmp<mode>_internal"
1.13807 ++ [(set (cc0)
1.13808 ++ (compare:CMP
1.13809 ++ (match_operand:CMP 0 "register_operand" "r")
1.13810 ++ (match_operand:CMP 1 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")))]
1.13811 ++ ""
1.13812 ++ {
1.13813 ++ set_next_insn_cond(insn,
1.13814 ++ avr32_output_cmp(get_next_insn_cond(insn), GET_MODE (operands[0]), operands[0], operands[1]));
1.13815 ++ return "";
1.13816 ++ }
1.13817 ++ [(set_attr "length" "4")
1.13818 ++ (set_attr "cc" "compare")])
1.13819 ++
1.13820 ++
1.13821 ++;;;=============================================================================
1.13822 ++;; Test if zero
1.13823 ++;;-----------------------------------------------------------------------------
1.13824 ++;; Compare reg against zero and set the condition codes.
1.13825 ++;;=============================================================================
1.13826 ++
1.13827 ++
1.13828 ++(define_expand "tstsi"
1.13829 ++ [(set (cc0)
1.13830 ++ (match_operand:SI 0 "register_operand" ""))]
1.13831 ++ ""
1.13832 ++ {
1.13833 ++ avr32_compare_op0 = operands[0];
1.13834 ++ avr32_compare_op1 = const0_rtx;
1.13835 ++ }
1.13836 ++)
1.13837 ++
1.13838 ++(define_insn "tstsi_internal"
1.13839 ++ [(set (cc0)
1.13840 ++ (match_operand:SI 0 "register_operand" "r"))]
1.13841 ++ ""
1.13842 ++ {
1.13843 ++ set_next_insn_cond(insn,
1.13844 ++ avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx));
1.13845 ++
1.13846 ++ return "";
1.13847 ++ }
1.13848 ++ [(set_attr "length" "2")
1.13849 ++ (set_attr "cc" "compare")])
1.13850 ++
1.13851 ++
1.13852 ++(define_expand "tstdi"
1.13853 ++ [(set (cc0)
1.13854 ++ (match_operand:DI 0 "register_operand" ""))]
1.13855 ++ ""
1.13856 ++ {
1.13857 ++ avr32_compare_op0 = operands[0];
1.13858 ++ avr32_compare_op1 = const0_rtx;
1.13859 ++ }
1.13860 ++)
1.13861 ++
1.13862 ++(define_insn "tstdi_internal"
1.13863 ++ [(set (cc0)
1.13864 ++ (match_operand:DI 0 "register_operand" "r"))]
1.13865 ++ ""
1.13866 ++ {
1.13867 ++ set_next_insn_cond(insn,
1.13868 ++ avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx));
1.13869 ++ return "";
1.13870 ++ }
1.13871 ++ [(set_attr "length" "4")
1.13872 ++ (set_attr "type" "alu2")
1.13873 ++ (set_attr "cc" "compare")])
1.13874 ++
1.13875 ++
1.13876 ++
1.13877 ++;;=============================================================================
1.13878 ++;; Convert operands
1.13879 ++;;-----------------------------------------------------------------------------
1.13880 ++;;
1.13881 ++;;=============================================================================
1.13882 ++(define_insn "truncdisi2"
1.13883 ++ [(set (match_operand:SI 0 "general_operand" "")
1.13884 ++ (truncate:SI (match_operand:DI 1 "general_operand" "")))]
1.13885 ++ ""
1.13886 ++ "truncdisi2")
1.13887 ++
1.13888 ++;;=============================================================================
1.13889 ++;; Extend
1.13890 ++;;-----------------------------------------------------------------------------
1.13891 ++;;
1.13892 ++;;=============================================================================
1.13893 ++
1.13894 ++
1.13895 ++(define_insn "extendhisi2"
1.13896 ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1.13897 ++ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
1.13898 ++ ""
1.13899 ++ {
1.13900 ++ switch ( which_alternative ){
1.13901 ++ case 0:
1.13902 ++ return "casts.h\t%0";
1.13903 ++ case 1:
1.13904 ++ return "bfexts\t%0, %1, 0, 16";
1.13905 ++ case 2:
1.13906 ++ case 3:
1.13907 ++ return "ld.sh\t%0, %1";
1.13908 ++ default:
1.13909 ++ abort();
1.13910 ++ }
1.13911 ++ }
1.13912 ++ [(set_attr "length" "2,4,2,4")
1.13913 ++ (set_attr "cc" "set_ncz,set_ncz,none,none")
1.13914 ++ (set_attr "type" "alu,alu,load_rm,load_rm")])
1.13915 ++
1.13916 ++(define_insn "extendqisi2"
1.13917 ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1.13918 ++ (sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
1.13919 ++ ""
1.13920 ++ {
1.13921 ++ switch ( which_alternative ){
1.13922 ++ case 0:
1.13923 ++ return "casts.b\t%0";
1.13924 ++ case 1:
1.13925 ++ return "bfexts\t%0, %1, 0, 8";
1.13926 ++ case 2:
1.13927 ++ case 3:
1.13928 ++ return "ld.sb\t%0, %1";
1.13929 ++ default:
1.13930 ++ abort();
1.13931 ++ }
1.13932 ++ }
1.13933 ++ [(set_attr "length" "2,4,2,4")
1.13934 ++ (set_attr "cc" "set_ncz,set_ncz,none,none")
1.13935 ++ (set_attr "type" "alu,alu,load_rm,load_rm")])
1.13936 ++
1.13937 ++(define_insn "extendqihi2"
1.13938 ++ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
1.13939 ++ (sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
1.13940 ++ ""
1.13941 ++ {
1.13942 ++ switch ( which_alternative ){
1.13943 ++ case 0:
1.13944 ++ return "casts.b\t%0";
1.13945 ++ case 1:
1.13946 ++ return "bfexts\t%0, %1, 0, 8";
1.13947 ++ case 2:
1.13948 ++ case 3:
1.13949 ++ return "ld.sb\t%0, %1";
1.13950 ++ default:
1.13951 ++ abort();
1.13952 ++ }
1.13953 ++ }
1.13954 ++ [(set_attr "length" "2,4,2,4")
1.13955 ++ (set_attr "cc" "set_ncz,set_ncz,none,none")
1.13956 ++ (set_attr "type" "alu,alu,load_rm,load_rm")])
1.13957 ++
1.13958 ++
1.13959 ++;;=============================================================================
1.13960 ++;; Zero-extend
1.13961 ++;;-----------------------------------------------------------------------------
1.13962 ++;;
1.13963 ++;;=============================================================================
1.13964 ++
1.13965 ++(define_insn "zero_extendhisi2"
1.13966 ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1.13967 ++ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
1.13968 ++ ""
1.13969 ++ {
1.13970 ++ switch ( which_alternative ){
1.13971 ++ case 0:
1.13972 ++ return "castu.h\t%0";
1.13973 ++ case 1:
1.13974 ++ return "bfextu\t%0, %1, 0, 16";
1.13975 ++ case 2:
1.13976 ++ case 3:
1.13977 ++ return "ld.uh\t%0, %1";
1.13978 ++ default:
1.13979 ++ abort();
1.13980 ++ }
1.13981 ++ }
1.13982 ++
1.13983 ++ [(set_attr "length" "2,4,2,4")
1.13984 ++ (set_attr "cc" "set_ncz,set_ncz,none,none")
1.13985 ++ (set_attr "type" "alu,alu,load_rm,load_rm")])
1.13986 ++
1.13987 ++(define_insn "zero_extendqisi2"
1.13988 ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1.13989 ++ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
1.13990 ++ ""
1.13991 ++ {
1.13992 ++ switch ( which_alternative ){
1.13993 ++ case 0:
1.13994 ++ return "castu.b\t%0";
1.13995 ++ case 1:
1.13996 ++ return "bfextu\t%0, %1, 0, 8";
1.13997 ++ case 2:
1.13998 ++ case 3:
1.13999 ++ return "ld.ub\t%0, %1";
1.14000 ++ default:
1.14001 ++ abort();
1.14002 ++ }
1.14003 ++ }
1.14004 ++ [(set_attr "length" "2,4,2,4")
1.14005 ++ (set_attr "cc" "set_ncz, set_ncz, none, none")
1.14006 ++ (set_attr "type" "alu, alu, load_rm, load_rm")])
1.14007 ++
1.14008 ++(define_insn "zero_extendqihi2"
1.14009 ++ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
1.14010 ++ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
1.14011 ++ ""
1.14012 ++ {
1.14013 ++ switch ( which_alternative ){
1.14014 ++ case 0:
1.14015 ++ return "castu.b\t%0";
1.14016 ++ case 1:
1.14017 ++ return "bfextu\t%0, %1, 0, 8";
1.14018 ++ case 2:
1.14019 ++ case 3:
1.14020 ++ return "ld.ub\t%0, %1";
1.14021 ++ default:
1.14022 ++ abort();
1.14023 ++ }
1.14024 ++ }
1.14025 ++ [(set_attr "length" "2,4,2,4")
1.14026 ++ (set_attr "cc" "set_ncz, set_ncz, none, none")
1.14027 ++ (set_attr "type" "alu, alu, load_rm, load_rm")])
1.14028 ++
1.14029 ++
1.14030 ++;;=============================================================================
1.14031 ++;; Conditional load and extend insns
1.14032 ++;;=============================================================================
1.14033 ++(define_insn "ldsi<mode>_predicable_se"
1.14034 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.14035 ++ (sign_extend:SI
1.14036 ++ (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
1.14037 ++ "TARGET_V2_INSNS"
1.14038 ++ "ld<INTM:load_postfix_s>%?\t%0, %1"
1.14039 ++ [(set_attr "length" "4")
1.14040 ++ (set_attr "cc" "cmp_cond_insn")
1.14041 ++ (set_attr "type" "load")
1.14042 ++ (set_attr "predicable" "yes")]
1.14043 ++)
1.14044 ++
1.14045 ++(define_insn "ldsi<mode>_predicable_ze"
1.14046 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.14047 ++ (zero_extend:SI
1.14048 ++ (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
1.14049 ++ "TARGET_V2_INSNS"
1.14050 ++ "ld<INTM:load_postfix_u>%?\t%0, %1"
1.14051 ++ [(set_attr "length" "4")
1.14052 ++ (set_attr "cc" "cmp_cond_insn")
1.14053 ++ (set_attr "type" "load")
1.14054 ++ (set_attr "predicable" "yes")]
1.14055 ++)
1.14056 ++
1.14057 ++(define_insn "ldhi_predicable_ze"
1.14058 ++ [(set (match_operand:HI 0 "register_operand" "=r")
1.14059 ++ (zero_extend:HI
1.14060 ++ (match_operand:QI 1 "memory_operand" "RKs10")))]
1.14061 ++ "TARGET_V2_INSNS"
1.14062 ++ "ld.ub%?\t%0, %1"
1.14063 ++ [(set_attr "length" "4")
1.14064 ++ (set_attr "cc" "cmp_cond_insn")
1.14065 ++ (set_attr "type" "load")
1.14066 ++ (set_attr "predicable" "yes")]
1.14067 ++)
1.14068 ++
1.14069 ++(define_insn "ldhi_predicable_se"
1.14070 ++ [(set (match_operand:HI 0 "register_operand" "=r")
1.14071 ++ (sign_extend:HI
1.14072 ++ (match_operand:QI 1 "memory_operand" "RKs10")))]
1.14073 ++ "TARGET_V2_INSNS"
1.14074 ++ "ld.sb%?\t%0, %1"
1.14075 ++ [(set_attr "length" "4")
1.14076 ++ (set_attr "cc" "cmp_cond_insn")
1.14077 ++ (set_attr "type" "load")
1.14078 ++ (set_attr "predicable" "yes")]
1.14079 ++)
1.14080 ++
1.14081 ++;;=============================================================================
1.14082 ++;; Conditional set register
1.14083 ++;; sr{cond4} rd
1.14084 ++;;-----------------------------------------------------------------------------
1.14085 ++
1.14086 ++;;Because of the same issue as with conditional moves and adds we must
1.14087 ++;;not separate the compare instrcution from the scc instruction as
1.14088 ++;;they might be sheduled "badly".
1.14089 ++
1.14090 ++(define_insn "s<code>"
1.14091 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.14092 ++ (any_cond:SI (cc0)
1.14093 ++ (const_int 0)))]
1.14094 ++ ""
1.14095 ++ "sr<cond>\t%0"
1.14096 ++ [(set_attr "length" "2")
1.14097 ++ (set_attr "cc" "none")])
1.14098 ++
1.14099 ++(define_insn "smi"
1.14100 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.14101 ++ (unspec:SI [(cc0)
1.14102 ++ (const_int 0)] UNSPEC_COND_MI))]
1.14103 ++ ""
1.14104 ++ "srmi\t%0"
1.14105 ++ [(set_attr "length" "2")
1.14106 ++ (set_attr "cc" "none")])
1.14107 ++
1.14108 ++(define_insn "spl"
1.14109 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.14110 ++ (unspec:SI [(cc0)
1.14111 ++ (const_int 0)] UNSPEC_COND_PL))]
1.14112 ++ ""
1.14113 ++ "srpl\t%0"
1.14114 ++ [(set_attr "length" "2")
1.14115 ++ (set_attr "cc" "none")])
1.14116 ++
1.14117 ++
1.14118 ++;;=============================================================================
1.14119 ++;; Conditional branch
1.14120 ++;;-----------------------------------------------------------------------------
1.14121 ++;; Branch to label if the specified condition codes are set.
1.14122 ++;;=============================================================================
1.14123 ++; branch if negative
1.14124 ++(define_insn "bmi"
1.14125 ++ [(set (pc)
1.14126 ++ (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
1.14127 ++ (label_ref (match_operand 0 "" ""))
1.14128 ++ (pc)))]
1.14129 ++ ""
1.14130 ++ "brmi %0"
1.14131 ++ [(set_attr "type" "branch")
1.14132 ++ (set (attr "length")
1.14133 ++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
1.14134 ++ (le (minus (pc) (match_dup 0)) (const_int 256)))
1.14135 ++ (const_int 2)] ; use compact branch
1.14136 ++ (const_int 4))) ; use extended branch
1.14137 ++ (set_attr "cc" "none")])
1.14138 ++
1.14139 ++(define_insn "*bmi-reverse"
1.14140 ++ [(set (pc)
1.14141 ++ (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
1.14142 ++ (pc)
1.14143 ++ (label_ref (match_operand 0 "" ""))))]
1.14144 ++ ""
1.14145 ++ "brpl %0"
1.14146 ++ [(set_attr "type" "branch")
1.14147 ++ (set (attr "length")
1.14148 ++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
1.14149 ++ (le (minus (pc) (match_dup 0)) (const_int 256)))
1.14150 ++ (const_int 2)] ; use compact branch
1.14151 ++ (const_int 4))) ; use extended branch
1.14152 ++ (set_attr "cc" "none")])
1.14153 ++
1.14154 ++; branch if positive
1.14155 ++(define_insn "bpl"
1.14156 ++ [(set (pc)
1.14157 ++ (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
1.14158 ++ (label_ref (match_operand 0 "" ""))
1.14159 ++ (pc)))]
1.14160 ++ ""
1.14161 ++ "brpl %0"
1.14162 ++ [(set_attr "type" "branch")
1.14163 ++ (set (attr "length")
1.14164 ++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
1.14165 ++ (le (minus (pc) (match_dup 0)) (const_int 256)))
1.14166 ++ (const_int 2)] ; use compact branch
1.14167 ++ (const_int 4))) ; use extended branch
1.14168 ++ (set_attr "cc" "none")])
1.14169 ++
1.14170 ++(define_insn "*bpl-reverse"
1.14171 ++ [(set (pc)
1.14172 ++ (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
1.14173 ++ (pc)
1.14174 ++ (label_ref (match_operand 0 "" ""))))]
1.14175 ++ ""
1.14176 ++ "brmi %0"
1.14177 ++ [(set_attr "type" "branch")
1.14178 ++ (set (attr "length")
1.14179 ++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
1.14180 ++ (le (minus (pc) (match_dup 0)) (const_int 256)))
1.14181 ++ (const_int 2)] ; use compact branch
1.14182 ++ (const_int 4))) ; use extended branch
1.14183 ++ (set_attr "cc" "none")])
1.14184 ++
1.14185 ++; branch if equal
1.14186 ++(define_insn "b<code>"
1.14187 ++ [(set (pc)
1.14188 ++ (if_then_else (any_cond:CC (cc0)
1.14189 ++ (const_int 0))
1.14190 ++ (label_ref (match_operand 0 "" ""))
1.14191 ++ (pc)))]
1.14192 ++ ""
1.14193 ++ "br<cond> %0 "
1.14194 ++ [(set_attr "type" "branch")
1.14195 ++ (set (attr "length")
1.14196 ++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
1.14197 ++ (le (minus (pc) (match_dup 0)) (const_int 256)))
1.14198 ++ (const_int 2)] ; use compact branch
1.14199 ++ (const_int 4))) ; use extended branch
1.14200 ++ (set_attr "cc" "none")])
1.14201 ++
1.14202 ++
1.14203 ++(define_insn "*b<code>-reverse"
1.14204 ++ [(set (pc)
1.14205 ++ (if_then_else (any_cond:CC (cc0)
1.14206 ++ (const_int 0))
1.14207 ++ (pc)
1.14208 ++ (label_ref (match_operand 0 "" ""))))]
1.14209 ++ ""
1.14210 ++ "br<invcond> %0 "
1.14211 ++ [(set_attr "type" "branch")
1.14212 ++ (set (attr "length")
1.14213 ++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
1.14214 ++ (le (minus (pc) (match_dup 0)) (const_int 256)))
1.14215 ++ (const_int 2)] ; use compact branch
1.14216 ++ (const_int 4))) ; use extended branch
1.14217 ++ (set_attr "cc" "none")])
1.14218 ++
1.14219 ++
1.14220 ++
1.14221 ++;=============================================================================
1.14222 ++; Conditional Add/Subtract
1.14223 ++;-----------------------------------------------------------------------------
1.14224 ++; sub{cond4} Rd, imm
1.14225 ++;=============================================================================
1.14226 ++
1.14227 ++
1.14228 ++(define_expand "add<mode>cc"
1.14229 ++ [(set (match_operand:ADDCC 0 "register_operand" "")
1.14230 ++ (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
1.14231 ++ [(match_dup 4)
1.14232 ++ (match_dup 5)])
1.14233 ++ (match_operand:ADDCC 2 "register_operand" "")
1.14234 ++ (plus:ADDCC
1.14235 ++ (match_dup 2)
1.14236 ++ (match_operand:ADDCC 3 "" ""))))]
1.14237 ++ ""
1.14238 ++ {
1.14239 ++ if ( !(GET_CODE (operands[3]) == CONST_INT
1.14240 ++ || (TARGET_V2_INSNS && REG_P(operands[3]))) ){
1.14241 ++ FAIL;
1.14242 ++ }
1.14243 ++
1.14244 ++ /* Delete compare instruction as it is merged into this instruction */
1.14245 ++ remove_insn (get_last_insn_anywhere ());
1.14246 ++
1.14247 ++ operands[4] = avr32_compare_op0;
1.14248 ++ operands[5] = avr32_compare_op1;
1.14249 ++
1.14250 ++ if ( TARGET_V2_INSNS
1.14251 ++ && REG_P(operands[3])
1.14252 ++ && REGNO(operands[0]) != REGNO(operands[2]) ){
1.14253 ++ emit_move_insn (operands[0], operands[2]);
1.14254 ++ operands[2] = operands[0];
1.14255 ++ }
1.14256 ++ }
1.14257 ++ )
1.14258 ++
1.14259 ++(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>_reg"
1.14260 ++ [(set (match_operand:ADDCC 0 "register_operand" "=r")
1.14261 ++ (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
1.14262 ++ [(match_operand:CMP 4 "register_operand" "r")
1.14263 ++ (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
1.14264 ++ (match_dup 0)
1.14265 ++ (plus:ADDCC
1.14266 ++ (match_operand:ADDCC 2 "register_operand" "r")
1.14267 ++ (match_operand:ADDCC 3 "register_operand" "r"))))]
1.14268 ++ "TARGET_V2_INSNS"
1.14269 ++ {
1.14270 ++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
1.14271 ++ return "add%i1\t%0, %2, %3";
1.14272 ++ }
1.14273 ++ [(set_attr "length" "8")
1.14274 ++ (set_attr "cc" "cmp_cond_insn")])
1.14275 ++
1.14276 ++(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>"
1.14277 ++ [(set (match_operand:ADDCC 0 "register_operand" "=r")
1.14278 ++ (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
1.14279 ++ [(match_operand:CMP 4 "register_operand" "r")
1.14280 ++ (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
1.14281 ++ (match_operand:ADDCC 2 "register_operand" "0")
1.14282 ++ (plus:ADDCC
1.14283 ++ (match_dup 2)
1.14284 ++ (match_operand:ADDCC 3 "avr32_cond_immediate_operand" "Is08"))))]
1.14285 ++ ""
1.14286 ++ {
1.14287 ++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
1.14288 ++ return "sub%i1\t%0, -%3";
1.14289 ++ }
1.14290 ++ [(set_attr "length" "8")
1.14291 ++ (set_attr "cc" "cmp_cond_insn")])
1.14292 ++
1.14293 ++;=============================================================================
1.14294 ++; Conditional Move
1.14295 ++;-----------------------------------------------------------------------------
1.14296 ++; mov{cond4} Rd, (Rs/imm)
1.14297 ++;=============================================================================
1.14298 ++(define_expand "mov<mode>cc"
1.14299 ++ [(set (match_operand:MOVCC 0 "register_operand" "")
1.14300 ++ (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
1.14301 ++ [(match_dup 4)
1.14302 ++ (match_dup 5)])
1.14303 ++ (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "")
1.14304 ++ (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "")))]
1.14305 ++ ""
1.14306 ++ {
1.14307 ++ /* Delete compare instruction as it is merged into this instruction */
1.14308 ++ remove_insn (get_last_insn_anywhere ());
1.14309 ++
1.14310 ++ operands[4] = avr32_compare_op0;
1.14311 ++ operands[5] = avr32_compare_op1;
1.14312 ++ }
1.14313 ++ )
1.14314 ++
1.14315 ++
1.14316 ++(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>"
1.14317 ++ [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r")
1.14318 ++ (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
1.14319 ++ [(match_operand:CMP 4 "register_operand" "r,r,r")
1.14320 ++ (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>,<CMP:cmp_constraint>,<CMP:cmp_constraint>")])
1.14321 ++ (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "0, rKs08,rKs08")
1.14322 ++ (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "rKs08,0,rKs08")))]
1.14323 ++ ""
1.14324 ++ {
1.14325 ++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
1.14326 ++
1.14327 ++ switch( which_alternative ){
1.14328 ++ case 0:
1.14329 ++ return "mov%i1 %0, %3";
1.14330 ++ case 1:
1.14331 ++ return "mov%1 %0, %2";
1.14332 ++ case 2:
1.14333 ++ return "mov%1 %0, %2\;mov%i1 %0, %3";
1.14334 ++ default:
1.14335 ++ abort();
1.14336 ++ }
1.14337 ++
1.14338 ++ }
1.14339 ++ [(set_attr "length" "8,8,12")
1.14340 ++ (set_attr "cc" "cmp_cond_insn")])
1.14341 ++
1.14342 ++
1.14343 ++
1.14344 ++
1.14345 ++;;=============================================================================
1.14346 ++;; jump
1.14347 ++;;-----------------------------------------------------------------------------
1.14348 ++;; Jump inside a function; an unconditional branch to a label.
1.14349 ++;;=============================================================================
1.14350 ++(define_insn "jump"
1.14351 ++ [(set (pc)
1.14352 ++ (label_ref (match_operand 0 "" "")))]
1.14353 ++ ""
1.14354 ++ {
1.14355 ++ if (get_attr_length(insn) > 4)
1.14356 ++ return "Can't jump this far";
1.14357 ++ return (get_attr_length(insn) == 2 ?
1.14358 ++ "rjmp %0" : "bral %0");
1.14359 ++ }
1.14360 ++ [(set_attr "type" "branch")
1.14361 ++ (set (attr "length")
1.14362 ++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022))
1.14363 ++ (le (minus (pc) (match_dup 0)) (const_int 1024)))
1.14364 ++ (const_int 2) ; use rjmp
1.14365 ++ (le (match_dup 0) (const_int 1048575))
1.14366 ++ (const_int 4)] ; use bral
1.14367 ++ (const_int 8))) ; do something else
1.14368 ++ (set_attr "cc" "none")])
1.14369 ++
1.14370 ++;;=============================================================================
1.14371 ++;; call
1.14372 ++;;-----------------------------------------------------------------------------
1.14373 ++;; Subroutine call instruction returning no value.
1.14374 ++;;=============================================================================
1.14375 ++(define_insn "call_internal"
1.14376 ++ [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W"))
1.14377 ++ (match_operand 1 "" ""))
1.14378 ++ (clobber (reg:SI LR_REGNUM))])]
1.14379 ++ ""
1.14380 ++ {
1.14381 ++ switch (which_alternative){
1.14382 ++ case 0:
1.14383 ++ return "icall\t%0";
1.14384 ++ case 1:
1.14385 ++ return "rcall\t%0";
1.14386 ++ case 2:
1.14387 ++ return "mcall\t%0";
1.14388 ++ case 3:
1.14389 ++ if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
1.14390 ++ return "call\t%0";
1.14391 ++ else
1.14392 ++ return "mcall\tr6[%0@got]";
1.14393 ++ default:
1.14394 ++ abort();
1.14395 ++ }
1.14396 ++ }
1.14397 ++ [(set_attr "type" "call")
1.14398 ++ (set_attr "length" "2,4,4,10")
1.14399 ++ (set_attr "cc" "clobber")])
1.14400 ++
1.14401 ++
1.14402 ++(define_expand "call"
1.14403 ++ [(parallel [(call (match_operand:SI 0 "" "")
1.14404 ++ (match_operand 1 "" ""))
1.14405 ++ (clobber (reg:SI LR_REGNUM))])]
1.14406 ++ ""
1.14407 ++ {
1.14408 ++ rtx call_address;
1.14409 ++ if ( GET_CODE(operands[0]) != MEM )
1.14410 ++ FAIL;
1.14411 ++
1.14412 ++ call_address = XEXP(operands[0], 0);
1.14413 ++
1.14414 ++ /* If assembler supports call pseudo insn and the call
1.14415 ++ address is a symbol then nothing special needs to be done. */
1.14416 ++ if ( TARGET_HAS_ASM_ADDR_PSEUDOS
1.14417 ++ && (GET_CODE(call_address) == SYMBOL_REF) ){
1.14418 ++ /* We must however mark the function as using the GOT if
1.14419 ++ flag_pic is set, since the call insn might turn into
1.14420 ++ a mcall using the GOT ptr register. */
1.14421 ++ if ( flag_pic ){
1.14422 ++ current_function_uses_pic_offset_table = 1;
1.14423 ++ emit_call_insn(gen_call_internal(call_address, operands[1]));
1.14424 ++ DONE;
1.14425 ++ }
1.14426 ++ } else {
1.14427 ++ if ( flag_pic &&
1.14428 ++ GET_CODE(call_address) == SYMBOL_REF ){
1.14429 ++ current_function_uses_pic_offset_table = 1;
1.14430 ++ emit_call_insn(gen_call_internal(call_address, operands[1]));
1.14431 ++ DONE;
1.14432 ++ }
1.14433 ++
1.14434 ++ if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) ){
1.14435 ++ if ( optimize_size &&
1.14436 ++ GET_CODE(call_address) == SYMBOL_REF ){
1.14437 ++ call_address = force_const_mem(SImode, call_address);
1.14438 ++ } else {
1.14439 ++ call_address = force_reg(SImode, call_address);
1.14440 ++ }
1.14441 ++ }
1.14442 ++ }
1.14443 ++ emit_call_insn(gen_call_internal(call_address, operands[1]));
1.14444 ++ DONE;
1.14445 ++ }
1.14446 ++)
1.14447 ++
1.14448 ++;;=============================================================================
1.14449 ++;; call_value
1.14450 ++;;-----------------------------------------------------------------------------
1.14451 ++;; Subrutine call instruction returning a value.
1.14452 ++;;=============================================================================
1.14453 ++(define_expand "call_value"
1.14454 ++ [(parallel [(set (match_operand:SI 0 "" "")
1.14455 ++ (call (match_operand:SI 1 "" "")
1.14456 ++ (match_operand 2 "" "")))
1.14457 ++ (clobber (reg:SI LR_REGNUM))])]
1.14458 ++ ""
1.14459 ++ {
1.14460 ++ rtx call_address;
1.14461 ++ if ( GET_CODE(operands[1]) != MEM )
1.14462 ++ FAIL;
1.14463 ++
1.14464 ++ call_address = XEXP(operands[1], 0);
1.14465 ++
1.14466 ++ /* If assembler supports call pseudo insn and the call
1.14467 ++ address is a symbol then nothing special needs to be done. */
1.14468 ++ if ( TARGET_HAS_ASM_ADDR_PSEUDOS
1.14469 ++ && (GET_CODE(call_address) == SYMBOL_REF) ){
1.14470 ++ /* We must however mark the function as using the GOT if
1.14471 ++ flag_pic is set, since the call insn might turn into
1.14472 ++ a mcall using the GOT ptr register. */
1.14473 ++ if ( flag_pic ) {
1.14474 ++ current_function_uses_pic_offset_table = 1;
1.14475 ++ emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
1.14476 ++ DONE;
1.14477 ++ }
1.14478 ++ } else {
1.14479 ++ if ( flag_pic &&
1.14480 ++ GET_CODE(call_address) == SYMBOL_REF ){
1.14481 ++ current_function_uses_pic_offset_table = 1;
1.14482 ++ emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
1.14483 ++ DONE;
1.14484 ++ }
1.14485 ++
1.14486 ++ if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){
1.14487 ++ if ( optimize_size &&
1.14488 ++ GET_CODE(call_address) == SYMBOL_REF){
1.14489 ++ call_address = force_const_mem(SImode, call_address);
1.14490 ++ } else {
1.14491 ++ call_address = force_reg(SImode, call_address);
1.14492 ++ }
1.14493 ++ }
1.14494 ++ }
1.14495 ++ emit_call_insn(gen_call_value_internal(operands[0], call_address,
1.14496 ++ operands[2]));
1.14497 ++ DONE;
1.14498 ++
1.14499 ++ })
1.14500 ++
1.14501 ++(define_insn "call_value_internal"
1.14502 ++ [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r")
1.14503 ++ (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W"))
1.14504 ++ (match_operand 2 "" "")))
1.14505 ++ (clobber (reg:SI LR_REGNUM))])]
1.14506 ++ ;; Operand 2 not used on the AVR32.
1.14507 ++ ""
1.14508 ++ {
1.14509 ++ switch (which_alternative){
1.14510 ++ case 0:
1.14511 ++ return "icall\t%1";
1.14512 ++ case 1:
1.14513 ++ return "rcall\t%1";
1.14514 ++ case 2:
1.14515 ++ return "mcall\t%1";
1.14516 ++ case 3:
1.14517 ++ if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
1.14518 ++ return "call\t%1";
1.14519 ++ else
1.14520 ++ return "mcall\tr6[%1@got]";
1.14521 ++ default:
1.14522 ++ abort();
1.14523 ++ }
1.14524 ++ }
1.14525 ++ [(set_attr "type" "call")
1.14526 ++ (set_attr "length" "2,4,4,10")
1.14527 ++ (set_attr "cc" "call_set")])
1.14528 ++
1.14529 ++
1.14530 ++;;=============================================================================
1.14531 ++;; untyped_call
1.14532 ++;;-----------------------------------------------------------------------------
1.14533 ++;; Subrutine call instruction returning a value of any type.
1.14534 ++;; The code is copied from m68k.md (except gen_blockage is removed)
1.14535 ++;; Fixme!
1.14536 ++;;=============================================================================
1.14537 ++(define_expand "untyped_call"
1.14538 ++ [(parallel [(call (match_operand 0 "avr32_call_operand" "")
1.14539 ++ (const_int 0))
1.14540 ++ (match_operand 1 "" "")
1.14541 ++ (match_operand 2 "" "")])]
1.14542 ++ ""
1.14543 ++ {
1.14544 ++ int i;
1.14545 ++
1.14546 ++ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
1.14547 ++
1.14548 ++ for (i = 0; i < XVECLEN (operands[2], 0); i++) {
1.14549 ++ rtx set = XVECEXP (operands[2], 0, i);
1.14550 ++ emit_move_insn (SET_DEST (set), SET_SRC (set));
1.14551 ++ }
1.14552 ++
1.14553 ++ /* The optimizer does not know that the call sets the function value
1.14554 ++ registers we stored in the result block. We avoid problems by
1.14555 ++ claiming that all hard registers are used and clobbered at this
1.14556 ++ point. */
1.14557 ++ emit_insn (gen_blockage ());
1.14558 ++
1.14559 ++ DONE;
1.14560 ++ })
1.14561 ++
1.14562 ++
1.14563 ++;;=============================================================================
1.14564 ++;; return
1.14565 ++;;=============================================================================
1.14566 ++
1.14567 ++(define_insn "return"
1.14568 ++ [(return)]
1.14569 ++ "USE_RETURN_INSN (FALSE)"
1.14570 ++ {
1.14571 ++ avr32_output_return_instruction(TRUE, FALSE, NULL, NULL);
1.14572 ++ return "";
1.14573 ++ }
1.14574 ++ [(set_attr "length" "4")
1.14575 ++ (set_attr "type" "call")]
1.14576 ++ )
1.14577 ++
1.14578 ++
1.14579 ++(define_insn "return_cond"
1.14580 ++ [(set (pc)
1.14581 ++ (if_then_else (match_operand 0 "avr32_comparison_operand" "")
1.14582 ++ (return)
1.14583 ++ (pc)))]
1.14584 ++ "USE_RETURN_INSN (TRUE)"
1.14585 ++ "ret%0\tr12";
1.14586 ++ [(set_attr "type" "call")])
1.14587 ++
1.14588 ++(define_insn "return_cond_predicable"
1.14589 ++ [(return)]
1.14590 ++ "USE_RETURN_INSN (TRUE)"
1.14591 ++ "ret%?\tr12";
1.14592 ++ [(set_attr "type" "call")
1.14593 ++ (set_attr "predicable" "yes")])
1.14594 ++
1.14595 ++
1.14596 ++(define_insn "return_imm"
1.14597 ++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
1.14598 ++ (use (reg RETVAL_REGNUM))
1.14599 ++ (return)])]
1.14600 ++ "USE_RETURN_INSN (FALSE) &&
1.14601 ++ ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
1.14602 ++ {
1.14603 ++ avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]);
1.14604 ++ return "";
1.14605 ++ }
1.14606 ++ [(set_attr "length" "4")
1.14607 ++ (set_attr "type" "call")]
1.14608 ++ )
1.14609 ++
1.14610 ++(define_insn "return_imm_cond"
1.14611 ++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
1.14612 ++ (use (reg RETVAL_REGNUM))
1.14613 ++ (set (pc)
1.14614 ++ (if_then_else (match_operand 1 "avr32_comparison_operand" "")
1.14615 ++ (return)
1.14616 ++ (pc)))])]
1.14617 ++ "USE_RETURN_INSN (TRUE) &&
1.14618 ++ ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
1.14619 ++ "ret%1\t%0";
1.14620 ++ [(set_attr "type" "call")]
1.14621 ++ )
1.14622 ++
1.14623 ++(define_insn "return_imm_predicable"
1.14624 ++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
1.14625 ++ (use (reg RETVAL_REGNUM))
1.14626 ++ (return)])]
1.14627 ++ "USE_RETURN_INSN (TRUE) &&
1.14628 ++ ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
1.14629 ++ "ret%?\t%0";
1.14630 ++ [(set_attr "type" "call")
1.14631 ++ (set_attr "predicable" "yes")])
1.14632 ++
1.14633 ++(define_insn "return_<mode>reg"
1.14634 ++ [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
1.14635 ++ (use (reg RETVAL_REGNUM))
1.14636 ++ (return)]
1.14637 ++ "USE_RETURN_INSN (TRUE)"
1.14638 ++ "ret%?\t%0";
1.14639 ++ [(set_attr "type" "call")
1.14640 ++ (set_attr "predicable" "yes")])
1.14641 ++
1.14642 ++(define_insn "return_<mode>reg_cond"
1.14643 ++ [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
1.14644 ++ (use (reg RETVAL_REGNUM))
1.14645 ++ (set (pc)
1.14646 ++ (if_then_else (match_operator 1 "avr32_comparison_operator"
1.14647 ++ [(cc0) (const_int 0)])
1.14648 ++ (return)
1.14649 ++ (pc)))]
1.14650 ++ "USE_RETURN_INSN (TRUE)"
1.14651 ++ "ret%1\t%0";
1.14652 ++ [(set_attr "type" "call")])
1.14653 ++
1.14654 ++;;=============================================================================
1.14655 ++;; nop
1.14656 ++;;-----------------------------------------------------------------------------
1.14657 ++;; No-op instruction.
1.14658 ++;;=============================================================================
1.14659 ++(define_insn "nop"
1.14660 ++ [(const_int 0)]
1.14661 ++ ""
1.14662 ++ "nop"
1.14663 ++ [(set_attr "length" "2")
1.14664 ++ (set_attr "type" "alu")
1.14665 ++ (set_attr "cc" "none")])
1.14666 ++
1.14667 ++;;=============================================================================
1.14668 ++;; nonlocal_goto_receiver
1.14669 ++;;-----------------------------------------------------------------------------
1.14670 ++;; For targets with a return stack we must make sure to flush the return stack
1.14671 ++;; since it will be corrupt after a nonlocal goto.
1.14672 ++;;=============================================================================
1.14673 ++(define_expand "nonlocal_goto_receiver"
1.14674 ++ [(const_int 0)]
1.14675 ++ "TARGET_RETURN_STACK"
1.14676 ++ "
1.14677 ++ {
1.14678 ++ emit_insn ( gen_frs() );
1.14679 ++ DONE;
1.14680 ++ }
1.14681 ++ "
1.14682 ++ )
1.14683 ++
1.14684 ++
1.14685 ++;;=============================================================================
1.14686 ++;; builtin_setjmp_receiver
1.14687 ++;;-----------------------------------------------------------------------------
1.14688 ++;; For pic code we need to reload the pic register.
1.14689 ++;; For targets with a return stack we must make sure to flush the return stack
1.14690 ++;; since it will probably be corrupted.
1.14691 ++;;=============================================================================
1.14692 ++(define_expand "builtin_setjmp_receiver"
1.14693 ++ [(label_ref (match_operand 0 "" ""))]
1.14694 ++ "flag_pic"
1.14695 ++ "
1.14696 ++ {
1.14697 ++ if ( TARGET_RETURN_STACK )
1.14698 ++ emit_insn ( gen_frs() );
1.14699 ++
1.14700 ++ avr32_load_pic_register ();
1.14701 ++ DONE;
1.14702 ++ }
1.14703 ++ "
1.14704 ++)
1.14705 ++
1.14706 ++
1.14707 ++;;=============================================================================
1.14708 ++;; indirect_jump
1.14709 ++;;-----------------------------------------------------------------------------
1.14710 ++;; Jump to an address in reg or memory.
1.14711 ++;;=============================================================================
1.14712 ++(define_expand "indirect_jump"
1.14713 ++ [(set (pc)
1.14714 ++ (match_operand:SI 0 "general_operand" ""))]
1.14715 ++ ""
1.14716 ++ {
1.14717 ++ /* One of the ops has to be in a register. */
1.14718 ++ if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS )
1.14719 ++ && !avr32_legitimate_pic_operand_p(operands[0]) )
1.14720 ++ operands[0] = legitimize_pic_address (operands[0], SImode, 0);
1.14721 ++ else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) )
1.14722 ++ /* If we have an address operand then this function uses the pic register. */
1.14723 ++ current_function_uses_pic_offset_table = 1;
1.14724 ++ })
1.14725 ++
1.14726 ++
1.14727 ++(define_insn "indirect_jump_internal"
1.14728 ++ [(set (pc)
1.14729 ++ (match_operand:SI 0 "avr32_non_rmw_general_operand" "r,m,W"))]
1.14730 ++ ""
1.14731 ++ {
1.14732 ++ switch( which_alternative ){
1.14733 ++ case 0:
1.14734 ++ return "mov\tpc, %0";
1.14735 ++ case 1:
1.14736 ++ if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) )
1.14737 ++ return "lddpc\tpc, %0";
1.14738 ++ else
1.14739 ++ return "ld.w\tpc, %0";
1.14740 ++ case 2:
1.14741 ++ if ( flag_pic )
1.14742 ++ return "ld.w\tpc, r6[%0@got]";
1.14743 ++ else
1.14744 ++ return "lda.w\tpc, %0";
1.14745 ++ default:
1.14746 ++ abort();
1.14747 ++ }
1.14748 ++ }
1.14749 ++ [(set_attr "length" "2,4,8")
1.14750 ++ (set_attr "type" "call,call,call")
1.14751 ++ (set_attr "cc" "none,none,clobber")])
1.14752 ++
1.14753 ++
1.14754 ++
1.14755 ++;;=============================================================================
1.14756 ++;; casesi and tablejump
1.14757 ++;;=============================================================================
1.14758 ++(define_insn "tablejump_add"
1.14759 ++ [(set (pc)
1.14760 ++ (plus:SI (match_operand:SI 0 "register_operand" "r")
1.14761 ++ (mult:SI (match_operand:SI 1 "register_operand" "r")
1.14762 ++ (match_operand:SI 2 "immediate_operand" "Ku04" ))))
1.14763 ++ (use (label_ref (match_operand 3 "" "")))]
1.14764 ++ "flag_pic &&
1.14765 ++ ((INTVAL(operands[2]) == 0) || (INTVAL(operands[2]) == 2) ||
1.14766 ++ (INTVAL(operands[2]) == 4) || (INTVAL(operands[2]) == 8))"
1.14767 ++ "add\tpc, %0, %1 << %p2"
1.14768 ++ [(set_attr "length" "4")
1.14769 ++ (set_attr "cc" "clobber")])
1.14770 ++
1.14771 ++(define_insn "tablejump_insn"
1.14772 ++ [(set (pc) (match_operand:SI 0 "memory_operand" "m"))
1.14773 ++ (use (label_ref (match_operand 1 "" "")))]
1.14774 ++ "!flag_pic"
1.14775 ++ "ld.w\tpc, %0"
1.14776 ++ [(set_attr "length" "4")
1.14777 ++ (set_attr "type" "call")
1.14778 ++ (set_attr "cc" "none")])
1.14779 ++
1.14780 ++(define_expand "casesi"
1.14781 ++ [(match_operand:SI 0 "register_operand" "") ; index to jump on
1.14782 ++ (match_operand:SI 1 "const_int_operand" "") ; lower bound
1.14783 ++ (match_operand:SI 2 "const_int_operand" "") ; total range
1.14784 ++ (match_operand:SI 3 "" "") ; table label
1.14785 ++ (match_operand:SI 4 "" "")] ; Out of range label
1.14786 ++ ""
1.14787 ++ "
1.14788 ++ {
1.14789 ++ rtx reg;
1.14790 ++ rtx index = operands[0];
1.14791 ++ rtx low_bound = operands[1];
1.14792 ++ rtx range = operands[2];
1.14793 ++ rtx table_label = operands[3];
1.14794 ++ rtx oor_label = operands[4];
1.14795 ++
1.14796 ++ index = force_reg ( SImode, index );
1.14797 ++ if (low_bound != const0_rtx)
1.14798 ++ {
1.14799 ++ if (!avr32_const_ok_for_constraint_p(INTVAL (low_bound), 'I', \"Is21\")){
1.14800 ++ reg = force_reg(SImode, GEN_INT (INTVAL (low_bound)));
1.14801 ++ emit_insn (gen_subsi3 (reg, index,
1.14802 ++ reg));
1.14803 ++ } else {
1.14804 ++ reg = gen_reg_rtx (SImode);
1.14805 ++ emit_insn (gen_addsi3 (reg, index,
1.14806 ++ GEN_INT (-INTVAL (low_bound))));
1.14807 ++ }
1.14808 ++ index = reg;
1.14809 ++ }
1.14810 ++
1.14811 ++ if (!avr32_const_ok_for_constraint_p (INTVAL (range), 'K', \"Ks21\"))
1.14812 ++ range = force_reg (SImode, range);
1.14813 ++
1.14814 ++ emit_cmp_and_jump_insns ( index, range, GTU, NULL_RTX, SImode, 1, oor_label );
1.14815 ++ reg = gen_reg_rtx (SImode);
1.14816 ++ emit_move_insn ( reg, gen_rtx_LABEL_REF (VOIDmode, table_label));
1.14817 ++
1.14818 ++ if ( flag_pic )
1.14819 ++ emit_jump_insn ( gen_tablejump_add ( reg, index, GEN_INT(4), table_label));
1.14820 ++ else
1.14821 ++ emit_jump_insn (
1.14822 ++ gen_tablejump_insn ( gen_rtx_MEM ( SImode,
1.14823 ++ gen_rtx_PLUS ( SImode,
1.14824 ++ reg,
1.14825 ++ gen_rtx_MULT ( SImode,
1.14826 ++ index,
1.14827 ++ GEN_INT(4)))),
1.14828 ++ table_label));
1.14829 ++ DONE;
1.14830 ++ }"
1.14831 ++)
1.14832 ++
1.14833 ++
1.14834 ++
1.14835 ++(define_insn "prefetch"
1.14836 ++ [(prefetch (match_operand:SI 0 "avr32_ks16_address_operand" "p")
1.14837 ++ (match_operand 1 "const_int_operand" "")
1.14838 ++ (match_operand 2 "const_int_operand" ""))]
1.14839 ++ ""
1.14840 ++ {
1.14841 ++ return "pref\t%0";
1.14842 ++ }
1.14843 ++
1.14844 ++ [(set_attr "length" "4")
1.14845 ++ (set_attr "type" "load")
1.14846 ++ (set_attr "cc" "none")])
1.14847 ++
1.14848 ++
1.14849 ++
1.14850 ++;;=============================================================================
1.14851 ++;; prologue
1.14852 ++;;-----------------------------------------------------------------------------
1.14853 ++;; This pattern, if defined, emits RTL for entry to a function. The function
1.14854 ++;; entry i responsible for setting up the stack frame, initializing the frame
1.14855 ++;; pointer register, saving callee saved registers, etc.
1.14856 ++;;=============================================================================
1.14857 ++(define_expand "prologue"
1.14858 ++ [(clobber (const_int 0))]
1.14859 ++ ""
1.14860 ++ "
1.14861 ++ avr32_expand_prologue();
1.14862 ++ DONE;
1.14863 ++ "
1.14864 ++ )
1.14865 ++
1.14866 ++;;=============================================================================
1.14867 ++;; eh_return
1.14868 ++;;-----------------------------------------------------------------------------
1.14869 ++;; This pattern, if defined, affects the way __builtin_eh_return, and
1.14870 ++;; thence the call frame exception handling library routines, are
1.14871 ++;; built. It is intended to handle non-trivial actions needed along
1.14872 ++;; the abnormal return path.
1.14873 ++;;
1.14874 ++;; The address of the exception handler to which the function should
1.14875 ++;; return is passed as operand to this pattern. It will normally need
1.14876 ++;; to copied by the pattern to some special register or memory
1.14877 ++;; location. If the pattern needs to determine the location of the
1.14878 ++;; target call frame in order to do so, it may use
1.14879 ++;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been
1.14880 ++;; assigned.
1.14881 ++;;
1.14882 ++;; If this pattern is not defined, the default action will be to
1.14883 ++;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either
1.14884 ++;; that macro or this pattern needs to be defined if call frame
1.14885 ++;; exception handling is to be used.
1.14886 ++
1.14887 ++;; We can't expand this before we know where the link register is stored.
1.14888 ++(define_insn_and_split "eh_return"
1.14889 ++ [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")]
1.14890 ++ VUNSPEC_EH_RETURN)
1.14891 ++ (clobber (match_scratch:SI 1 "=&r"))]
1.14892 ++ ""
1.14893 ++ "#"
1.14894 ++ "reload_completed"
1.14895 ++ [(const_int 0)]
1.14896 ++ "
1.14897 ++ {
1.14898 ++ avr32_set_return_address (operands[0], operands[1]);
1.14899 ++ DONE;
1.14900 ++ }"
1.14901 ++ )
1.14902 ++
1.14903 ++
1.14904 ++;;=============================================================================
1.14905 ++;; ffssi2
1.14906 ++;;-----------------------------------------------------------------------------
1.14907 ++(define_insn "ffssi2"
1.14908 ++ [ (set (match_operand:SI 0 "register_operand" "=r")
1.14909 ++ (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ]
1.14910 ++ ""
1.14911 ++ "mov %0, %1
1.14912 ++ brev %0
1.14913 ++ clz %0, %0
1.14914 ++ sub %0, -1
1.14915 ++ cp %0, 33
1.14916 ++ moveq %0, 0"
1.14917 ++ [(set_attr "length" "18")
1.14918 ++ (set_attr "cc" "clobber")]
1.14919 ++ )
1.14920 ++
1.14921 ++
1.14922 ++
1.14923 ++;;=============================================================================
1.14924 ++;; swap_h
1.14925 ++;;-----------------------------------------------------------------------------
1.14926 ++(define_insn "*swap_h"
1.14927 ++ [ (set (match_operand:SI 0 "register_operand" "=r")
1.14928 ++ (ior:SI (ashift:SI (match_dup 0) (const_int 16))
1.14929 ++ (lshiftrt:SI (match_dup 0) (const_int 16))))]
1.14930 ++ ""
1.14931 ++ "swap.h %0"
1.14932 ++ [(set_attr "length" "2")]
1.14933 ++ )
1.14934 ++
1.14935 ++(define_insn_and_split "bswap_16"
1.14936 ++ [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r")
1.14937 ++ (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13")
1.14938 ++ (const_int 8))
1.14939 ++ (const_int 255))
1.14940 ++ (ashift:HI (and:HI (match_dup 1)
1.14941 ++ (const_int 255))
1.14942 ++ (const_int 8))))]
1.14943 ++ ""
1.14944 ++ {
1.14945 ++ switch ( which_alternative ){
1.14946 ++ case 0:
1.14947 ++ if ( REGNO(operands[0]) == REGNO(operands[1]))
1.14948 ++ return "swap.bh\t%0";
1.14949 ++ else
1.14950 ++ return "mov\t%0, %1\;swap.bh\t%0";
1.14951 ++ case 1:
1.14952 ++ return "stswp.h\t%0, %1";
1.14953 ++ case 2:
1.14954 ++ return "ldswp.sh\t%0, %1";
1.14955 ++ default:
1.14956 ++ abort();
1.14957 ++ }
1.14958 ++ }
1.14959 ++
1.14960 ++ "(reload_completed &&
1.14961 ++ REG_P(operands[0]) && REG_P(operands[1])
1.14962 ++ && (REGNO(operands[0]) != REGNO(operands[1])))"
1.14963 ++ [(set (match_dup 0) (match_dup 1))
1.14964 ++ (set (match_dup 0)
1.14965 ++ (ior:HI (and:HI (lshiftrt:HI (match_dup 0)
1.14966 ++ (const_int 8))
1.14967 ++ (const_int 255))
1.14968 ++ (ashift:HI (and:HI (match_dup 0)
1.14969 ++ (const_int 255))
1.14970 ++ (const_int 8))))]
1.14971 ++ ""
1.14972 ++
1.14973 ++ [(set_attr "length" "4,4,4")
1.14974 ++ (set_attr "type" "alu,store,load_rm")]
1.14975 ++ )
1.14976 ++
1.14977 ++(define_insn_and_split "bswap_32"
1.14978 ++ [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r")
1.14979 ++ (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "r,r,RKs14")
1.14980 ++ (const_int -16777216))
1.14981 ++ (const_int 24))
1.14982 ++ (lshiftrt:SI (and:SI (match_dup 1)
1.14983 ++ (const_int 16711680))
1.14984 ++ (const_int 8)))
1.14985 ++ (ior:SI (ashift:SI (and:SI (match_dup 1)
1.14986 ++ (const_int 65280))
1.14987 ++ (const_int 8))
1.14988 ++ (ashift:SI (and:SI (match_dup 1)
1.14989 ++ (const_int 255))
1.14990 ++ (const_int 24)))))]
1.14991 ++ ""
1.14992 ++ {
1.14993 ++ switch ( which_alternative ){
1.14994 ++ case 0:
1.14995 ++ if ( REGNO(operands[0]) == REGNO(operands[1]))
1.14996 ++ return "swap.b\t%0";
1.14997 ++ else
1.14998 ++ return "#";
1.14999 ++ case 1:
1.15000 ++ return "stswp.w\t%0, %1";
1.15001 ++ case 2:
1.15002 ++ return "ldswp.w\t%0, %1";
1.15003 ++ default:
1.15004 ++ abort();
1.15005 ++ }
1.15006 ++ }
1.15007 ++ "(reload_completed &&
1.15008 ++ REG_P(operands[0]) && REG_P(operands[1])
1.15009 ++ && (REGNO(operands[0]) != REGNO(operands[1])))"
1.15010 ++ [(set (match_dup 0) (match_dup 1))
1.15011 ++ (set (match_dup 0)
1.15012 ++ (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0)
1.15013 ++ (const_int -16777216))
1.15014 ++ (const_int 24))
1.15015 ++ (lshiftrt:SI (and:SI (match_dup 0)
1.15016 ++ (const_int 16711680))
1.15017 ++ (const_int 8)))
1.15018 ++ (ior:SI (ashift:SI (and:SI (match_dup 0)
1.15019 ++ (const_int 65280))
1.15020 ++ (const_int 8))
1.15021 ++ (ashift:SI (and:SI (match_dup 0)
1.15022 ++ (const_int 255))
1.15023 ++ (const_int 24)))))]
1.15024 ++ ""
1.15025 ++
1.15026 ++ [(set_attr "length" "4,4,4")
1.15027 ++ (set_attr "type" "alu,store,load_rm")]
1.15028 ++ )
1.15029 ++
1.15030 ++
1.15031 ++;;=============================================================================
1.15032 ++;; blockage
1.15033 ++;;-----------------------------------------------------------------------------
1.15034 ++;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
1.15035 ++;; all of memory. This blocks insns from being moved across this point.
1.15036 ++
1.15037 ++(define_insn "blockage"
1.15038 ++ [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
1.15039 ++ ""
1.15040 ++ ""
1.15041 ++ [(set_attr "length" "0")]
1.15042 ++)
1.15043 ++
1.15044 ++;;=============================================================================
1.15045 ++;; clzsi2
1.15046 ++;;-----------------------------------------------------------------------------
1.15047 ++(define_insn "clzsi2"
1.15048 ++ [ (set (match_operand:SI 0 "register_operand" "=r")
1.15049 ++ (clz:SI (match_operand:SI 1 "register_operand" "r"))) ]
1.15050 ++ ""
1.15051 ++ "clz %0, %1"
1.15052 ++ [(set_attr "length" "4")
1.15053 ++ (set_attr "cc" "set_z")]
1.15054 ++ )
1.15055 ++
1.15056 ++;;=============================================================================
1.15057 ++;; ctzsi2
1.15058 ++;;-----------------------------------------------------------------------------
1.15059 ++(define_insn "ctzsi2"
1.15060 ++ [ (set (match_operand:SI 0 "register_operand" "=r,r")
1.15061 ++ (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ]
1.15062 ++ ""
1.15063 ++ "@
1.15064 ++ brev\t%0\;clz\t%0, %0
1.15065 ++ mov\t%0, %1\;brev\t%0\;clz\t%0, %0"
1.15066 ++ [(set_attr "length" "8")
1.15067 ++ (set_attr "cc" "set_z")]
1.15068 ++ )
1.15069 ++
1.15070 ++;;=============================================================================
1.15071 ++;; cache instructions
1.15072 ++;;-----------------------------------------------------------------------------
1.15073 ++(define_insn "cache"
1.15074 ++ [ (unspec_volatile [(match_operand:SI 0 "avr32_ks11_address_operand" "p")
1.15075 ++ (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)]
1.15076 ++ ""
1.15077 ++ "cache %0, %1"
1.15078 ++ [(set_attr "length" "4")]
1.15079 ++ )
1.15080 ++
1.15081 ++(define_insn "sync"
1.15082 ++ [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)]
1.15083 ++ ""
1.15084 ++ "sync %0"
1.15085 ++ [(set_attr "length" "4")]
1.15086 ++ )
1.15087 ++
1.15088 ++;;=============================================================================
1.15089 ++;; TLB instructions
1.15090 ++;;-----------------------------------------------------------------------------
1.15091 ++(define_insn "tlbr"
1.15092 ++ [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)]
1.15093 ++ ""
1.15094 ++ "tlbr"
1.15095 ++ [(set_attr "length" "2")]
1.15096 ++ )
1.15097 ++
1.15098 ++(define_insn "tlbw"
1.15099 ++ [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)]
1.15100 ++ ""
1.15101 ++ "tlbw"
1.15102 ++ [(set_attr "length" "2")]
1.15103 ++ )
1.15104 ++
1.15105 ++(define_insn "tlbs"
1.15106 ++ [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)]
1.15107 ++ ""
1.15108 ++ "tlbs"
1.15109 ++ [(set_attr "length" "2")]
1.15110 ++ )
1.15111 ++
1.15112 ++;;=============================================================================
1.15113 ++;; Breakpoint instruction
1.15114 ++;;-----------------------------------------------------------------------------
1.15115 ++(define_insn "breakpoint"
1.15116 ++ [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)]
1.15117 ++ ""
1.15118 ++ "breakpoint"
1.15119 ++ [(set_attr "length" "2")]
1.15120 ++ )
1.15121 ++
1.15122 ++
1.15123 ++;;=============================================================================
1.15124 ++;; mtsr/mfsr instruction
1.15125 ++;;-----------------------------------------------------------------------------
1.15126 ++(define_insn "mtsr"
1.15127 ++ [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
1.15128 ++ (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)]
1.15129 ++ ""
1.15130 ++ "mtsr\t%0, %1"
1.15131 ++ [(set_attr "length" "4")]
1.15132 ++ )
1.15133 ++
1.15134 ++(define_insn "mfsr"
1.15135 ++ [ (set (match_operand:SI 0 "register_operand" "=r")
1.15136 ++ (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ]
1.15137 ++ ""
1.15138 ++ "mfsr\t%0, %1"
1.15139 ++ [(set_attr "length" "4")]
1.15140 ++ )
1.15141 ++
1.15142 ++;;=============================================================================
1.15143 ++;; mtdr/mfdr instruction
1.15144 ++;;-----------------------------------------------------------------------------
1.15145 ++(define_insn "mtdr"
1.15146 ++ [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
1.15147 ++ (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)]
1.15148 ++ ""
1.15149 ++ "mtdr\t%0, %1"
1.15150 ++ [(set_attr "length" "4")]
1.15151 ++ )
1.15152 ++
1.15153 ++(define_insn "mfdr"
1.15154 ++ [ (set (match_operand:SI 0 "register_operand" "=r")
1.15155 ++ (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ]
1.15156 ++ ""
1.15157 ++ "mfdr\t%0, %1"
1.15158 ++ [(set_attr "length" "4")]
1.15159 ++ )
1.15160 ++
1.15161 ++;;=============================================================================
1.15162 ++;; musfr
1.15163 ++;;-----------------------------------------------------------------------------
1.15164 ++(define_insn "musfr"
1.15165 ++ [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)]
1.15166 ++ ""
1.15167 ++ "musfr\t%0"
1.15168 ++ [(set_attr "length" "2")
1.15169 ++ (set_attr "cc" "clobber")]
1.15170 ++ )
1.15171 ++
1.15172 ++(define_insn "mustr"
1.15173 ++ [ (set (match_operand:SI 0 "register_operand" "=r")
1.15174 ++ (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ]
1.15175 ++ ""
1.15176 ++ "mustr\t%0"
1.15177 ++ [(set_attr "length" "2")]
1.15178 ++ )
1.15179 ++
1.15180 ++(define_insn "ssrf"
1.15181 ++ [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_SSRF)]
1.15182 ++ ""
1.15183 ++ "ssrf %0"
1.15184 ++ [(set_attr "length" "2")
1.15185 ++ (set_attr "cc" "clobber")]
1.15186 ++ )
1.15187 ++
1.15188 ++(define_insn "csrf"
1.15189 ++ [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_CSRF)]
1.15190 ++ ""
1.15191 ++ "csrf %0"
1.15192 ++ [(set_attr "length" "2")
1.15193 ++ (set_attr "cc" "clobber")]
1.15194 ++ )
1.15195 ++
1.15196 ++;;=============================================================================
1.15197 ++;; Flush Return Stack instruction
1.15198 ++;;-----------------------------------------------------------------------------
1.15199 ++(define_insn "frs"
1.15200 ++ [ (unspec_volatile [(const_int 0)] VUNSPEC_FRS)]
1.15201 ++ ""
1.15202 ++ "frs"
1.15203 ++ [(set_attr "length" "2")
1.15204 ++ (set_attr "cc" "none")]
1.15205 ++ )
1.15206 ++
1.15207 ++
1.15208 ++;;=============================================================================
1.15209 ++;; Saturation Round Scale instruction
1.15210 ++;;-----------------------------------------------------------------------------
1.15211 ++(define_insn "sats"
1.15212 ++ [ (set (match_operand:SI 0 "register_operand" "+r")
1.15213 ++ (unspec:SI [(match_dup 0)
1.15214 ++ (match_operand 1 "immediate_operand" "Ku05")
1.15215 ++ (match_operand 2 "immediate_operand" "Ku05")]
1.15216 ++ UNSPEC_SATS)) ]
1.15217 ++ "TARGET_DSP"
1.15218 ++ "sats\t%0 >> %1, %2"
1.15219 ++ [(set_attr "type" "alu_sat")
1.15220 ++ (set_attr "length" "4")]
1.15221 ++ )
1.15222 ++
1.15223 ++(define_insn "satu"
1.15224 ++ [ (set (match_operand:SI 0 "register_operand" "+r")
1.15225 ++ (unspec:SI [(match_dup 0)
1.15226 ++ (match_operand 1 "immediate_operand" "Ku05")
1.15227 ++ (match_operand 2 "immediate_operand" "Ku05")]
1.15228 ++ UNSPEC_SATU)) ]
1.15229 ++ "TARGET_DSP"
1.15230 ++ "satu\t%0 >> %1, %2"
1.15231 ++ [(set_attr "type" "alu_sat")
1.15232 ++ (set_attr "length" "4")]
1.15233 ++ )
1.15234 ++
1.15235 ++(define_insn "satrnds"
1.15236 ++ [ (set (match_operand:SI 0 "register_operand" "+r")
1.15237 ++ (unspec:SI [(match_dup 0)
1.15238 ++ (match_operand 1 "immediate_operand" "Ku05")
1.15239 ++ (match_operand 2 "immediate_operand" "Ku05")]
1.15240 ++ UNSPEC_SATRNDS)) ]
1.15241 ++ "TARGET_DSP"
1.15242 ++ "satrnds\t%0 >> %1, %2"
1.15243 ++ [(set_attr "type" "alu_sat")
1.15244 ++ (set_attr "length" "4")]
1.15245 ++ )
1.15246 ++
1.15247 ++(define_insn "satrndu"
1.15248 ++ [ (set (match_operand:SI 0 "register_operand" "+r")
1.15249 ++ (unspec:SI [(match_dup 0)
1.15250 ++ (match_operand 1 "immediate_operand" "Ku05")
1.15251 ++ (match_operand 2 "immediate_operand" "Ku05")]
1.15252 ++ UNSPEC_SATRNDU)) ]
1.15253 ++ "TARGET_DSP"
1.15254 ++ "sats\t%0 >> %1, %2"
1.15255 ++ [(set_attr "type" "alu_sat")
1.15256 ++ (set_attr "length" "4")]
1.15257 ++ )
1.15258 ++
1.15259 ++;; Special patterns for dealing with the constant pool
1.15260 ++
1.15261 ++(define_insn "align_4"
1.15262 ++ [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)]
1.15263 ++ ""
1.15264 ++ {
1.15265 ++ assemble_align (32);
1.15266 ++ return "";
1.15267 ++ }
1.15268 ++ [(set_attr "length" "2")]
1.15269 ++)
1.15270 ++
1.15271 ++(define_insn "consttable_start"
1.15272 ++ [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)]
1.15273 ++ ""
1.15274 ++ {
1.15275 ++ return ".cpool";
1.15276 ++ }
1.15277 ++ [(set_attr "length" "0")]
1.15278 ++ )
1.15279 ++
1.15280 ++(define_insn "consttable_end"
1.15281 ++ [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
1.15282 ++ ""
1.15283 ++ {
1.15284 ++ making_const_table = FALSE;
1.15285 ++ return "";
1.15286 ++ }
1.15287 ++ [(set_attr "length" "0")]
1.15288 ++)
1.15289 ++
1.15290 ++
1.15291 ++(define_insn "consttable_4"
1.15292 ++ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)]
1.15293 ++ ""
1.15294 ++ {
1.15295 ++ making_const_table = TRUE;
1.15296 ++ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
1.15297 ++ {
1.15298 ++ case MODE_FLOAT:
1.15299 ++ {
1.15300 ++ REAL_VALUE_TYPE r;
1.15301 ++ char real_string[1024];
1.15302 ++ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
1.15303 ++ real_to_decimal(real_string, &r, 1024, 0, 1);
1.15304 ++ asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string);
1.15305 ++ break;
1.15306 ++ }
1.15307 ++ default:
1.15308 ++ assemble_integer (operands[0], 4, 0, 1);
1.15309 ++ break;
1.15310 ++ }
1.15311 ++ return "";
1.15312 ++ }
1.15313 ++ [(set_attr "length" "4")]
1.15314 ++)
1.15315 ++
1.15316 ++(define_insn "consttable_8"
1.15317 ++ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)]
1.15318 ++ ""
1.15319 ++ {
1.15320 ++ making_const_table = TRUE;
1.15321 ++ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
1.15322 ++ {
1.15323 ++ case MODE_FLOAT:
1.15324 ++ {
1.15325 ++ REAL_VALUE_TYPE r;
1.15326 ++ char real_string[1024];
1.15327 ++ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
1.15328 ++ real_to_decimal(real_string, &r, 1024, 0, 1);
1.15329 ++ asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string);
1.15330 ++ break;
1.15331 ++ }
1.15332 ++ default:
1.15333 ++ assemble_integer(operands[0], 8, 0, 1);
1.15334 ++ break;
1.15335 ++ }
1.15336 ++ return "";
1.15337 ++ }
1.15338 ++ [(set_attr "length" "8")]
1.15339 ++)
1.15340 ++
1.15341 ++(define_insn "consttable_16"
1.15342 ++ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_16)]
1.15343 ++ ""
1.15344 ++ {
1.15345 ++ making_const_table = TRUE;
1.15346 ++ assemble_integer(operands[0], 16, 0, 1);
1.15347 ++ return "";
1.15348 ++ }
1.15349 ++ [(set_attr "length" "16")]
1.15350 ++)
1.15351 ++
1.15352 ++;;=============================================================================
1.15353 ++;; coprocessor instructions
1.15354 ++;;-----------------------------------------------------------------------------
1.15355 ++(define_insn "cop"
1.15356 ++ [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03")
1.15357 ++ (match_operand 1 "immediate_operand" "Ku04")
1.15358 ++ (match_operand 2 "immediate_operand" "Ku04")
1.15359 ++ (match_operand 3 "immediate_operand" "Ku04")
1.15360 ++ (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)]
1.15361 ++ ""
1.15362 ++ "cop\tcp%0, cr%1, cr%2, cr%3, %4"
1.15363 ++ [(set_attr "length" "4")]
1.15364 ++ )
1.15365 ++
1.15366 ++(define_insn "mvcrsi"
1.15367 ++ [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z")
1.15368 ++ (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
1.15369 ++ (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
1.15370 ++ VUNSPEC_MVCR)) ]
1.15371 ++ ""
1.15372 ++ "@
1.15373 ++ mvcr.w\tcp%1, %0, cr%2
1.15374 ++ stcm.w\tcp%1, %0, cr%2
1.15375 ++ stc.w\tcp%1, %0, cr%2"
1.15376 ++ [(set_attr "length" "4")]
1.15377 ++ )
1.15378 ++
1.15379 ++(define_insn "mvcrdi"
1.15380 ++ [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z")
1.15381 ++ (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
1.15382 ++ (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
1.15383 ++ VUNSPEC_MVCR)) ]
1.15384 ++ ""
1.15385 ++ "@
1.15386 ++ mvcr.d\tcp%1, %0, cr%2
1.15387 ++ stcm.d\tcp%1, %0, cr%2-cr%i2
1.15388 ++ stc.d\tcp%1, %0, cr%2"
1.15389 ++ [(set_attr "length" "4")]
1.15390 ++ )
1.15391 ++
1.15392 ++(define_insn "mvrcsi"
1.15393 ++ [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
1.15394 ++ (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
1.15395 ++ (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")]
1.15396 ++ VUNSPEC_MVRC)]
1.15397 ++ ""
1.15398 ++ {
1.15399 ++ switch (which_alternative){
1.15400 ++ case 0:
1.15401 ++ return "mvrc.w\tcp%0, cr%1, %2";
1.15402 ++ case 1:
1.15403 ++ return "ldcm.w\tcp%0, %2, cr%1";
1.15404 ++ case 2:
1.15405 ++ return "ldc.w\tcp%0, cr%1, %2";
1.15406 ++ default:
1.15407 ++ abort();
1.15408 ++ }
1.15409 ++ }
1.15410 ++ [(set_attr "length" "4")]
1.15411 ++ )
1.15412 ++
1.15413 ++(define_insn "mvrcdi"
1.15414 ++ [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
1.15415 ++ (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
1.15416 ++ (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")]
1.15417 ++ VUNSPEC_MVRC)]
1.15418 ++ ""
1.15419 ++ {
1.15420 ++ switch (which_alternative){
1.15421 ++ case 0:
1.15422 ++ return "mvrc.d\tcp%0, cr%1, %2";
1.15423 ++ case 1:
1.15424 ++ return "ldcm.d\tcp%0, %2, cr%1-cr%i1";
1.15425 ++ case 2:
1.15426 ++ return "ldc.d\tcp%0, cr%1, %2";
1.15427 ++ default:
1.15428 ++ abort();
1.15429 ++ }
1.15430 ++ }
1.15431 ++ [(set_attr "length" "4")]
1.15432 ++ )
1.15433 ++
1.15434 ++;;=============================================================================
1.15435 ++;; epilogue
1.15436 ++;;-----------------------------------------------------------------------------
1.15437 ++;; This pattern emits RTL for exit from a function. The function exit is
1.15438 ++;; responsible for deallocating the stack frame, restoring callee saved
1.15439 ++;; registers and emitting the return instruction.
1.15440 ++;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead.
1.15441 ++;;=============================================================================
1.15442 ++(define_expand "epilogue"
1.15443 ++ [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
1.15444 ++ ""
1.15445 ++ "
1.15446 ++ if (USE_RETURN_INSN (FALSE)){
1.15447 ++ emit_jump_insn (gen_return ());
1.15448 ++ DONE;
1.15449 ++ }
1.15450 ++ emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
1.15451 ++ gen_rtvec (1,
1.15452 ++ gen_rtx_RETURN (VOIDmode)),
1.15453 ++ VUNSPEC_EPILOGUE));
1.15454 ++ DONE;
1.15455 ++ "
1.15456 ++ )
1.15457 ++
1.15458 ++(define_insn "*epilogue_insns"
1.15459 ++ [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
1.15460 ++ ""
1.15461 ++ {
1.15462 ++ avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
1.15463 ++ return "";
1.15464 ++ }
1.15465 ++ ; Length is absolute worst case
1.15466 ++ [(set_attr "type" "branch")
1.15467 ++ (set_attr "length" "12")]
1.15468 ++ )
1.15469 ++
1.15470 ++(define_insn "*epilogue_insns_ret_imm"
1.15471 ++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
1.15472 ++ (use (reg RETVAL_REGNUM))
1.15473 ++ (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])]
1.15474 ++ "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
1.15475 ++ {
1.15476 ++ avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
1.15477 ++ return "";
1.15478 ++ }
1.15479 ++ ; Length is absolute worst case
1.15480 ++ [(set_attr "type" "branch")
1.15481 ++ (set_attr "length" "12")]
1.15482 ++ )
1.15483 ++
1.15484 ++(define_insn "sibcall_epilogue"
1.15485 ++ [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)]
1.15486 ++ ""
1.15487 ++ {
1.15488 ++ avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
1.15489 ++ return "";
1.15490 ++ }
1.15491 ++;; Length is absolute worst case
1.15492 ++ [(set_attr "type" "branch")
1.15493 ++ (set_attr "length" "12")]
1.15494 ++ )
1.15495 ++
1.15496 ++(define_insn "*sibcall_epilogue_insns_ret_imm"
1.15497 ++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
1.15498 ++ (use (reg RETVAL_REGNUM))
1.15499 ++ (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])]
1.15500 ++ "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
1.15501 ++ {
1.15502 ++ avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
1.15503 ++ return "";
1.15504 ++ }
1.15505 ++ ; Length is absolute worst case
1.15506 ++ [(set_attr "type" "branch")
1.15507 ++ (set_attr "length" "12")]
1.15508 ++ )
1.15509 ++
1.15510 ++(define_insn "ldxi"
1.15511 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.15512 ++ (mem:SI (plus:SI
1.15513 ++ (match_operand:SI 1 "register_operand" "r")
1.15514 ++ (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r")
1.15515 ++ (const_int 8)
1.15516 ++ (match_operand:SI 3 "immediate_operand" "Ku05"))
1.15517 ++ (const_int 4)))))]
1.15518 ++ "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8
1.15519 ++ || INTVAL(operands[3]) == 0)"
1.15520 ++ {
1.15521 ++ switch ( INTVAL(operands[3]) ){
1.15522 ++ case 0:
1.15523 ++ return "ld.w %0, %1[%2:b << 2]";
1.15524 ++ case 8:
1.15525 ++ return "ld.w %0, %1[%2:l << 2]";
1.15526 ++ case 16:
1.15527 ++ return "ld.w %0, %1[%2:u << 2]";
1.15528 ++ case 24:
1.15529 ++ return "ld.w %0, %1[%2:t << 2]";
1.15530 ++ default:
1.15531 ++ internal_error("illegal operand for ldxi");
1.15532 ++ }
1.15533 ++ }
1.15534 ++ [(set_attr "type" "load")
1.15535 ++ (set_attr "length" "4")
1.15536 ++ (set_attr "cc" "none")])
1.15537 ++
1.15538 ++
1.15539 ++
1.15540 ++
1.15541 ++
1.15542 ++
1.15543 ++;;=============================================================================
1.15544 ++;; Peephole optimizing
1.15545 ++;;-----------------------------------------------------------------------------
1.15546 ++;; Changing
1.15547 ++;; sub r8, r7, 8
1.15548 ++;; st.w r8[0x0], r12
1.15549 ++;; to
1.15550 ++;; sub r8, r7, 8
1.15551 ++;; st.w r7[-0x8], r12
1.15552 ++;;=============================================================================
1.15553 ++; (set (reg:SI 9 r8)
1.15554 ++; (plus:SI (reg/f:SI 6 r7)
1.15555 ++; (const_int ...)))
1.15556 ++; (set (mem:SI (reg:SI 9 r8))
1.15557 ++; (reg:SI 12 r12))
1.15558 ++(define_peephole2
1.15559 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15560 ++ (plus:SI (match_operand:SI 1 "register_operand" "")
1.15561 ++ (match_operand:SI 2 "immediate_operand" "")))
1.15562 ++ (set (mem:SI (match_dup 0))
1.15563 ++ (match_operand:SI 3 "register_operand" ""))]
1.15564 ++ "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
1.15565 ++ [(set (match_dup 0)
1.15566 ++ (plus:SI (match_dup 1)
1.15567 ++ (match_dup 2)))
1.15568 ++ (set (mem:SI (plus:SI (match_dup 1)
1.15569 ++ (match_dup 2)))
1.15570 ++ (match_dup 3))]
1.15571 ++ "")
1.15572 ++
1.15573 ++;;=============================================================================
1.15574 ++;; Peephole optimizing
1.15575 ++;;-----------------------------------------------------------------------------
1.15576 ++;; Changing
1.15577 ++;; sub r6, r7, 4
1.15578 ++;; ld.w r6, r6[0x0]
1.15579 ++;; to
1.15580 ++;; sub r6, r7, 4
1.15581 ++;; ld.w r6, r7[-0x4]
1.15582 ++;;=============================================================================
1.15583 ++; (set (reg:SI 7 r6)
1.15584 ++; (plus:SI (reg/f:SI 6 r7)
1.15585 ++; (const_int -4 [0xfffffffc])))
1.15586 ++; (set (reg:SI 7 r6)
1.15587 ++; (mem:SI (reg:SI 7 r6)))
1.15588 ++(define_peephole2
1.15589 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15590 ++ (plus:SI (match_operand:SI 1 "register_operand" "")
1.15591 ++ (match_operand:SI 2 "immediate_operand" "")))
1.15592 ++ (set (match_operand:SI 3 "register_operand" "")
1.15593 ++ (mem:SI (match_dup 0)))]
1.15594 ++ "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
1.15595 ++ [(set (match_dup 0)
1.15596 ++ (plus:SI (match_dup 1)
1.15597 ++ (match_dup 2)))
1.15598 ++ (set (match_dup 3)
1.15599 ++ (mem:SI (plus:SI (match_dup 1)
1.15600 ++ (match_dup 2))))]
1.15601 ++ "")
1.15602 ++
1.15603 ++;;=============================================================================
1.15604 ++;; Peephole optimizing
1.15605 ++;;-----------------------------------------------------------------------------
1.15606 ++;; Changing
1.15607 ++;; ld.sb r0, r7[-0x6]
1.15608 ++;; cashs.b r0
1.15609 ++;; to
1.15610 ++;; ld.sb r0, r7[-0x6]
1.15611 ++;;=============================================================================
1.15612 ++(define_peephole2
1.15613 ++ [(set (match_operand:QI 0 "register_operand" "")
1.15614 ++ (match_operand:QI 1 "load_sb_memory_operand" ""))
1.15615 ++ (set (match_operand:SI 2 "register_operand" "")
1.15616 ++ (sign_extend:SI (match_dup 0)))]
1.15617 ++ "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))"
1.15618 ++ [(set (match_dup 2)
1.15619 ++ (sign_extend:SI (match_dup 1)))]
1.15620 ++ "")
1.15621 ++
1.15622 ++;;=============================================================================
1.15623 ++;; Peephole optimizing
1.15624 ++;;-----------------------------------------------------------------------------
1.15625 ++;; Changing
1.15626 ++;; ld.ub r0, r7[-0x6]
1.15627 ++;; cashu.b r0
1.15628 ++;; to
1.15629 ++;; ld.ub r0, r7[-0x6]
1.15630 ++;;=============================================================================
1.15631 ++(define_peephole2
1.15632 ++ [(set (match_operand:QI 0 "register_operand" "")
1.15633 ++ (match_operand:QI 1 "memory_operand" ""))
1.15634 ++ (set (match_operand:SI 2 "register_operand" "")
1.15635 ++ (zero_extend:SI (match_dup 0)))]
1.15636 ++ "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
1.15637 ++ [(set (match_dup 2)
1.15638 ++ (zero_extend:SI (match_dup 1)))]
1.15639 ++ "")
1.15640 ++
1.15641 ++;;=============================================================================
1.15642 ++;; Peephole optimizing
1.15643 ++;;-----------------------------------------------------------------------------
1.15644 ++;; Changing
1.15645 ++;; ld.sh r0, r7[-0x6]
1.15646 ++;; casts.h r0
1.15647 ++;; to
1.15648 ++;; ld.sh r0, r7[-0x6]
1.15649 ++;;=============================================================================
1.15650 ++(define_peephole2
1.15651 ++ [(set (match_operand:HI 0 "register_operand" "")
1.15652 ++ (match_operand:HI 1 "memory_operand" ""))
1.15653 ++ (set (match_operand:SI 2 "register_operand" "")
1.15654 ++ (sign_extend:SI (match_dup 0)))]
1.15655 ++ "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
1.15656 ++ [(set (match_dup 2)
1.15657 ++ (sign_extend:SI (match_dup 1)))]
1.15658 ++ "")
1.15659 ++
1.15660 ++;;=============================================================================
1.15661 ++;; Peephole optimizing
1.15662 ++;;-----------------------------------------------------------------------------
1.15663 ++;; Changing
1.15664 ++;; ld.uh r0, r7[-0x6]
1.15665 ++;; castu.h r0
1.15666 ++;; to
1.15667 ++;; ld.uh r0, r7[-0x6]
1.15668 ++;;=============================================================================
1.15669 ++(define_peephole2
1.15670 ++ [(set (match_operand:HI 0 "register_operand" "")
1.15671 ++ (match_operand:HI 1 "memory_operand" ""))
1.15672 ++ (set (match_operand:SI 2 "register_operand" "")
1.15673 ++ (zero_extend:SI (match_dup 0)))]
1.15674 ++ "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
1.15675 ++ [(set (match_dup 2)
1.15676 ++ (zero_extend:SI (match_dup 1)))]
1.15677 ++ "")
1.15678 ++
1.15679 ++;;=============================================================================
1.15680 ++;; Peephole optimizing
1.15681 ++;;-----------------------------------------------------------------------------
1.15682 ++;; Changing
1.15683 ++;; mul rd, rx, ry
1.15684 ++;; add rd2, rd
1.15685 ++;; or
1.15686 ++;; add rd2, rd, rd2
1.15687 ++;; to
1.15688 ++;; mac rd2, rx, ry
1.15689 ++;;=============================================================================
1.15690 ++(define_peephole2
1.15691 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15692 ++ (mult:SI (match_operand:SI 1 "register_operand" "")
1.15693 ++ (match_operand:SI 2 "register_operand" "")))
1.15694 ++ (set (match_operand:SI 3 "register_operand" "")
1.15695 ++ (plus:SI (match_dup 3)
1.15696 ++ (match_dup 0)))]
1.15697 ++ "peep2_reg_dead_p(2, operands[0])"
1.15698 ++ [(set (match_dup 3)
1.15699 ++ (plus:SI (mult:SI (match_dup 1)
1.15700 ++ (match_dup 2))
1.15701 ++ (match_dup 3)))]
1.15702 ++ "")
1.15703 ++
1.15704 ++(define_peephole2
1.15705 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15706 ++ (mult:SI (match_operand:SI 1 "register_operand" "")
1.15707 ++ (match_operand:SI 2 "register_operand" "")))
1.15708 ++ (set (match_operand:SI 3 "register_operand" "")
1.15709 ++ (plus:SI (match_dup 0)
1.15710 ++ (match_dup 3)))]
1.15711 ++ "peep2_reg_dead_p(2, operands[0])"
1.15712 ++ [(set (match_dup 3)
1.15713 ++ (plus:SI (mult:SI (match_dup 1)
1.15714 ++ (match_dup 2))
1.15715 ++ (match_dup 3)))]
1.15716 ++ "")
1.15717 ++
1.15718 ++
1.15719 ++;;=============================================================================
1.15720 ++;; Peephole optimizing
1.15721 ++;;-----------------------------------------------------------------------------
1.15722 ++;; Changing
1.15723 ++;; bfextu rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask
1.15724 ++;; to
1.15725 ++;; bld rs, k5
1.15726 ++;;
1.15727 ++;; If rd is dead after the operation.
1.15728 ++;;=============================================================================
1.15729 ++(define_peephole2
1.15730 ++ [ (set (match_operand:SI 0 "register_operand" "")
1.15731 ++ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
1.15732 ++ (const_int 1)
1.15733 ++ (match_operand:SI 2 "immediate_operand" "")))
1.15734 ++ (set (cc0)
1.15735 ++ (match_dup 0))]
1.15736 ++ "peep2_reg_dead_p(2, operands[0])"
1.15737 ++ [(set (cc0)
1.15738 ++ (and:SI (match_dup 1)
1.15739 ++ (match_dup 2)))]
1.15740 ++ "operands[2] = GEN_INT(1 << INTVAL(operands[2]));")
1.15741 ++
1.15742 ++(define_peephole2
1.15743 ++ [ (set (match_operand:SI 0 "register_operand" "")
1.15744 ++ (and:SI (match_operand:SI 1 "register_operand" "")
1.15745 ++ (match_operand:SI 2 "one_bit_set_operand" "")))
1.15746 ++ (set (cc0)
1.15747 ++ (match_dup 0))]
1.15748 ++ "peep2_reg_dead_p(2, operands[0])"
1.15749 ++ [(set (cc0)
1.15750 ++ (and:SI (match_dup 1)
1.15751 ++ (match_dup 2)))]
1.15752 ++ "")
1.15753 ++
1.15754 ++;;=============================================================================
1.15755 ++;; Peephole optimizing
1.15756 ++;;-----------------------------------------------------------------------------
1.15757 ++;; Load with extracted index: ld.w Rd, Rb[Ri:{t/u/b/l} << 2]
1.15758 ++;;
1.15759 ++;;=============================================================================
1.15760 ++
1.15761 ++
1.15762 ++(define_peephole
1.15763 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15764 ++ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
1.15765 ++ (const_int 8)
1.15766 ++ (match_operand:SI 2 "avr32_extract_shift_operand" "")))
1.15767 ++ (set (match_operand:SI 3 "register_operand" "")
1.15768 ++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
1.15769 ++ (match_operand:SI 4 "register_operand" ""))))]
1.15770 ++
1.15771 ++ "(dead_or_set_p(insn, operands[0]))"
1.15772 ++ {
1.15773 ++ switch ( INTVAL(operands[2]) ){
1.15774 ++ case 0:
1.15775 ++ return "ld.w %3, %4[%1:b << 2]";
1.15776 ++ case 8:
1.15777 ++ return "ld.w %3, %4[%1:l << 2]";
1.15778 ++ case 16:
1.15779 ++ return "ld.w %3, %4[%1:u << 2]";
1.15780 ++ case 24:
1.15781 ++ return "ld.w %3, %4[%1:t << 2]";
1.15782 ++ default:
1.15783 ++ internal_error("illegal operand for ldxi");
1.15784 ++ }
1.15785 ++ }
1.15786 ++ [(set_attr "type" "load")
1.15787 ++ (set_attr "length" "4")
1.15788 ++ (set_attr "cc" "clobber")]
1.15789 ++ )
1.15790 ++
1.15791 ++
1.15792 ++
1.15793 ++(define_peephole
1.15794 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15795 ++ (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255)))
1.15796 ++ (set (match_operand:SI 2 "register_operand" "")
1.15797 ++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
1.15798 ++ (match_operand:SI 3 "register_operand" ""))))]
1.15799 ++
1.15800 ++ "(dead_or_set_p(insn, operands[0]))"
1.15801 ++
1.15802 ++ "ld.w %2, %3[%1:b << 2]"
1.15803 ++ [(set_attr "type" "load")
1.15804 ++ (set_attr "length" "4")
1.15805 ++ (set_attr "cc" "clobber")]
1.15806 ++ )
1.15807 ++
1.15808 ++
1.15809 ++(define_peephole2
1.15810 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15811 ++ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
1.15812 ++ (const_int 8)
1.15813 ++ (match_operand:SI 2 "avr32_extract_shift_operand" "")))
1.15814 ++ (set (match_operand:SI 3 "register_operand" "")
1.15815 ++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
1.15816 ++ (match_operand:SI 4 "register_operand" ""))))]
1.15817 ++
1.15818 ++ "(peep2_reg_dead_p(2, operands[0]))
1.15819 ++ || (REGNO(operands[0]) == REGNO(operands[3]))"
1.15820 ++ [(set (match_dup 3)
1.15821 ++ (mem:SI (plus:SI
1.15822 ++ (match_dup 4)
1.15823 ++ (mult:SI (zero_extract:SI (match_dup 1)
1.15824 ++ (const_int 8)
1.15825 ++ (match_dup 2))
1.15826 ++ (const_int 4)))))]
1.15827 ++ )
1.15828 ++
1.15829 ++(define_peephole2
1.15830 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15831 ++ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
1.15832 ++ (set (match_operand:SI 2 "register_operand" "")
1.15833 ++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
1.15834 ++ (match_operand:SI 3 "register_operand" ""))))]
1.15835 ++
1.15836 ++ "(peep2_reg_dead_p(2, operands[0]))
1.15837 ++ || (REGNO(operands[0]) == REGNO(operands[2]))"
1.15838 ++ [(set (match_dup 2)
1.15839 ++ (mem:SI (plus:SI
1.15840 ++ (match_dup 3)
1.15841 ++ (mult:SI (zero_extract:SI (match_dup 1)
1.15842 ++ (const_int 8)
1.15843 ++ (const_int 0))
1.15844 ++ (const_int 4)))))]
1.15845 ++ "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));"
1.15846 ++ )
1.15847 ++
1.15848 ++
1.15849 ++(define_peephole2
1.15850 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15851 ++ (and:SI (match_operand:SI 1 "register_operand" "")
1.15852 ++ (const_int 255)))
1.15853 ++ (set (match_operand:SI 2 "register_operand" "")
1.15854 ++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
1.15855 ++ (match_operand:SI 3 "register_operand" ""))))]
1.15856 ++
1.15857 ++ "(peep2_reg_dead_p(2, operands[0]))
1.15858 ++ || (REGNO(operands[0]) == REGNO(operands[2]))"
1.15859 ++ [(set (match_dup 2)
1.15860 ++ (mem:SI (plus:SI
1.15861 ++ (match_dup 3)
1.15862 ++ (mult:SI (zero_extract:SI (match_dup 1)
1.15863 ++ (const_int 8)
1.15864 ++ (const_int 0))
1.15865 ++ (const_int 4)))))]
1.15866 ++ ""
1.15867 ++ )
1.15868 ++
1.15869 ++
1.15870 ++
1.15871 ++(define_peephole2
1.15872 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15873 ++ (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
1.15874 ++ (const_int 24)))
1.15875 ++ (set (match_operand:SI 2 "register_operand" "")
1.15876 ++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
1.15877 ++ (match_operand:SI 3 "register_operand" ""))))]
1.15878 ++
1.15879 ++ "(peep2_reg_dead_p(2, operands[0]))
1.15880 ++ || (REGNO(operands[0]) == REGNO(operands[2]))"
1.15881 ++ [(set (match_dup 2)
1.15882 ++ (mem:SI (plus:SI
1.15883 ++ (match_dup 3)
1.15884 ++ (mult:SI (zero_extract:SI (match_dup 1)
1.15885 ++ (const_int 8)
1.15886 ++ (const_int 24))
1.15887 ++ (const_int 4)))))]
1.15888 ++ ""
1.15889 ++ )
1.15890 ++
1.15891 ++
1.15892 ++;;************************************************
1.15893 ++;; ANDN
1.15894 ++;;
1.15895 ++;;************************************************
1.15896 ++
1.15897 ++
1.15898 ++(define_peephole2
1.15899 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15900 ++ (not:SI (match_operand:SI 1 "register_operand" "")))
1.15901 ++ (set (match_operand:SI 2 "register_operand" "")
1.15902 ++ (and:SI (match_dup 2)
1.15903 ++ (match_dup 0)))]
1.15904 ++ "peep2_reg_dead_p(2, operands[0])"
1.15905 ++
1.15906 ++ [(set (match_dup 2)
1.15907 ++ (and:SI (match_dup 2)
1.15908 ++ (not:SI (match_dup 1))
1.15909 ++ ))]
1.15910 ++ ""
1.15911 ++)
1.15912 ++
1.15913 ++(define_peephole2
1.15914 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15915 ++ (not:SI (match_operand:SI 1 "register_operand" "")))
1.15916 ++ (set (match_operand:SI 2 "register_operand" "")
1.15917 ++ (and:SI (match_dup 0)
1.15918 ++ (match_dup 2)
1.15919 ++ ))]
1.15920 ++ "peep2_reg_dead_p(2, operands[0])"
1.15921 ++
1.15922 ++ [(set (match_dup 2)
1.15923 ++ (and:SI (match_dup 2)
1.15924 ++ (not:SI (match_dup 1))
1.15925 ++ ))]
1.15926 ++
1.15927 ++ ""
1.15928 ++)
1.15929 ++
1.15930 ++
1.15931 ++;;=================================================================
1.15932 ++;; Addabs peephole
1.15933 ++;;=================================================================
1.15934 ++
1.15935 ++(define_peephole
1.15936 ++ [(set (match_operand:SI 2 "register_operand" "=r")
1.15937 ++ (abs:SI (match_operand:SI 1 "register_operand" "r")))
1.15938 ++ (set (match_operand:SI 0 "register_operand" "=r")
1.15939 ++ (plus:SI (match_operand:SI 3 "register_operand" "r")
1.15940 ++ (match_dup 2)))]
1.15941 ++ "dead_or_set_p(insn, operands[2])"
1.15942 ++ "addabs %0, %3, %1"
1.15943 ++ [(set_attr "length" "4")
1.15944 ++ (set_attr "cc" "set_z")])
1.15945 ++
1.15946 ++(define_peephole
1.15947 ++ [(set (match_operand:SI 2 "register_operand" "=r")
1.15948 ++ (abs:SI (match_operand:SI 1 "register_operand" "r")))
1.15949 ++ (set (match_operand:SI 0 "register_operand" "=r")
1.15950 ++ (plus:SI (match_dup 2)
1.15951 ++ (match_operand:SI 3 "register_operand" "r")))]
1.15952 ++ "dead_or_set_p(insn, operands[2])"
1.15953 ++ "addabs %0, %3, %1"
1.15954 ++ [(set_attr "length" "4")
1.15955 ++ (set_attr "cc" "set_z")])
1.15956 ++
1.15957 ++
1.15958 ++;;=================================================================
1.15959 ++;; Detect roundings
1.15960 ++;;=================================================================
1.15961 ++
1.15962 ++(define_insn "*round"
1.15963 ++ [(set (match_operand:SI 0 "register_operand" "+r")
1.15964 ++ (ashiftrt:SI (plus:SI (match_dup 0)
1.15965 ++ (match_operand:SI 1 "immediate_operand" "i"))
1.15966 ++ (match_operand:SI 2 "immediate_operand" "i")))]
1.15967 ++ "avr32_rnd_operands(operands[1], operands[2])"
1.15968 ++
1.15969 ++ "satrnds %0 >> %2, 31"
1.15970 ++
1.15971 ++ [(set_attr "type" "alu_sat")
1.15972 ++ (set_attr "length" "4")]
1.15973 ++
1.15974 ++ )
1.15975 ++
1.15976 ++
1.15977 ++(define_peephole2
1.15978 ++ [(set (match_operand:SI 0 "register_operand" "")
1.15979 ++ (plus:SI (match_dup 0)
1.15980 ++ (match_operand:SI 1 "immediate_operand" "")))
1.15981 ++ (set (match_dup 0)
1.15982 ++ (ashiftrt:SI (match_dup 0)
1.15983 ++ (match_operand:SI 2 "immediate_operand" "")))]
1.15984 ++ "avr32_rnd_operands(operands[1], operands[2])"
1.15985 ++
1.15986 ++ [(set (match_dup 0)
1.15987 ++ (ashiftrt:SI (plus:SI (match_dup 0)
1.15988 ++ (match_dup 1))
1.15989 ++ (match_dup 2)))]
1.15990 ++ )
1.15991 ++
1.15992 ++(define_peephole
1.15993 ++ [(set (match_operand:SI 0 "register_operand" "r")
1.15994 ++ (plus:SI (match_dup 0)
1.15995 ++ (match_operand:SI 1 "immediate_operand" "i")))
1.15996 ++ (set (match_dup 0)
1.15997 ++ (ashiftrt:SI (match_dup 0)
1.15998 ++ (match_operand:SI 2 "immediate_operand" "i")))]
1.15999 ++ "avr32_rnd_operands(operands[1], operands[2])"
1.16000 ++
1.16001 ++ "satrnds %0 >> %2, 31"
1.16002 ++
1.16003 ++ [(set_attr "type" "alu_sat")
1.16004 ++ (set_attr "length" "4")
1.16005 ++ (set_attr "cc" "clobber")]
1.16006 ++
1.16007 ++ )
1.16008 ++
1.16009 ++
1.16010 ++;;=================================================================
1.16011 ++;; mcall
1.16012 ++;;=================================================================
1.16013 ++(define_peephole
1.16014 ++ [(set (match_operand:SI 0 "register_operand" "")
1.16015 ++ (match_operand 1 "avr32_const_pool_ref_operand" ""))
1.16016 ++ (parallel [(call (mem:SI (match_dup 0))
1.16017 ++ (match_operand 2 "" ""))
1.16018 ++ (clobber (reg:SI LR_REGNUM))])]
1.16019 ++ "dead_or_set_p(insn, operands[0])"
1.16020 ++ "mcall %1"
1.16021 ++ [(set_attr "type" "call")
1.16022 ++ (set_attr "length" "4")
1.16023 ++ (set_attr "cc" "clobber")]
1.16024 ++)
1.16025 ++
1.16026 ++(define_peephole
1.16027 ++ [(set (match_operand:SI 2 "register_operand" "")
1.16028 ++ (match_operand 1 "avr32_const_pool_ref_operand" ""))
1.16029 ++ (parallel [(set (match_operand 0 "register_operand" "")
1.16030 ++ (call (mem:SI (match_dup 2))
1.16031 ++ (match_operand 3 "" "")))
1.16032 ++ (clobber (reg:SI LR_REGNUM))])]
1.16033 ++ "dead_or_set_p(insn, operands[2])"
1.16034 ++ "mcall %1"
1.16035 ++ [(set_attr "type" "call")
1.16036 ++ (set_attr "length" "4")
1.16037 ++ (set_attr "cc" "call_set")]
1.16038 ++)
1.16039 ++
1.16040 ++
1.16041 ++(define_peephole2
1.16042 ++ [(set (match_operand:SI 0 "register_operand" "")
1.16043 ++ (match_operand 1 "avr32_const_pool_ref_operand" ""))
1.16044 ++ (parallel [(call (mem:SI (match_dup 0))
1.16045 ++ (match_operand 2 "" ""))
1.16046 ++ (clobber (reg:SI LR_REGNUM))])]
1.16047 ++ "peep2_reg_dead_p(2, operands[0])"
1.16048 ++ [(parallel [(call (mem:SI (match_dup 1))
1.16049 ++ (match_dup 2))
1.16050 ++ (clobber (reg:SI LR_REGNUM))])]
1.16051 ++ ""
1.16052 ++)
1.16053 ++
1.16054 ++(define_peephole2
1.16055 ++ [(set (match_operand:SI 0 "register_operand" "")
1.16056 ++ (match_operand 1 "avr32_const_pool_ref_operand" ""))
1.16057 ++ (parallel [(set (match_operand 2 "register_operand" "")
1.16058 ++ (call (mem:SI (match_dup 0))
1.16059 ++ (match_operand 3 "" "")))
1.16060 ++ (clobber (reg:SI LR_REGNUM))])]
1.16061 ++ "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))"
1.16062 ++ [(parallel [(set (match_dup 2)
1.16063 ++ (call (mem:SI (match_dup 1))
1.16064 ++ (match_dup 3)))
1.16065 ++ (clobber (reg:SI LR_REGNUM))])]
1.16066 ++ ""
1.16067 ++)
1.16068 ++
1.16069 ++;;=================================================================
1.16070 ++;; Returning a value
1.16071 ++;;=================================================================
1.16072 ++
1.16073 ++
1.16074 ++(define_peephole
1.16075 ++ [(set (match_operand 0 "register_operand" "")
1.16076 ++ (match_operand 1 "register_operand" ""))
1.16077 ++ (return)]
1.16078 ++ "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)
1.16079 ++ && (REGNO(operands[1]) != LR_REGNUM)
1.16080 ++ && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)"
1.16081 ++ "retal %1"
1.16082 ++ [(set_attr "type" "call")
1.16083 ++ (set_attr "length" "2")]
1.16084 ++ )
1.16085 ++
1.16086 ++
1.16087 ++(define_peephole
1.16088 ++ [(set (match_operand 0 "register_operand" "r")
1.16089 ++ (match_operand 1 "immediate_operand" "i"))
1.16090 ++ (return)]
1.16091 ++ "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
1.16092 ++ ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))"
1.16093 ++ {
1.16094 ++ avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]);
1.16095 ++ return "";
1.16096 ++ }
1.16097 ++ [(set_attr "type" "call")
1.16098 ++ (set_attr "length" "4")]
1.16099 ++ )
1.16100 ++
1.16101 ++(define_peephole
1.16102 ++ [(set (match_operand 0 "register_operand" "r")
1.16103 ++ (match_operand 1 "immediate_operand" "i"))
1.16104 ++ (unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
1.16105 ++ "(REGNO(operands[0]) == RETVAL_REGNUM) &&
1.16106 ++ ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))"
1.16107 ++ {
1.16108 ++ avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]);
1.16109 ++ return "";
1.16110 ++ }
1.16111 ++ ; Length is absolute worst case
1.16112 ++ [(set_attr "type" "branch")
1.16113 ++ (set_attr "length" "12")]
1.16114 ++ )
1.16115 ++
1.16116 ++(define_peephole
1.16117 ++ [(set (match_operand 0 "register_operand" "=r")
1.16118 ++ (if_then_else (match_operator 1 "avr32_comparison_operator"
1.16119 ++ [(match_operand 4 "register_operand" "r")
1.16120 ++ (match_operand 5 "register_immediate_operand" "rKs21")])
1.16121 ++ (match_operand 2 "avr32_cond_register_immediate_operand" "rKs08")
1.16122 ++ (match_operand 3 "avr32_cond_register_immediate_operand" "rKs08")))
1.16123 ++ (return)]
1.16124 ++ "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)"
1.16125 ++ {
1.16126 ++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
1.16127 ++
1.16128 ++ if ( GET_CODE(operands[2]) == REG
1.16129 ++ && GET_CODE(operands[3]) == REG
1.16130 ++ && REGNO(operands[2]) != LR_REGNUM
1.16131 ++ && REGNO(operands[3]) != LR_REGNUM ){
1.16132 ++ return "ret%1 %2\;ret%i1 %3";
1.16133 ++ } else if ( GET_CODE(operands[2]) == REG
1.16134 ++ && GET_CODE(operands[3]) == CONST_INT ){
1.16135 ++ if ( INTVAL(operands[3]) == -1
1.16136 ++ || INTVAL(operands[3]) == 0
1.16137 ++ || INTVAL(operands[3]) == 1 ){
1.16138 ++ return "ret%1 %2\;ret%i1 %d3";
1.16139 ++ } else {
1.16140 ++ return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
1.16141 ++ }
1.16142 ++ } else if ( GET_CODE(operands[2]) == CONST_INT
1.16143 ++ && GET_CODE(operands[3]) == REG ){
1.16144 ++ if ( INTVAL(operands[2]) == -1
1.16145 ++ || INTVAL(operands[2]) == 0
1.16146 ++ || INTVAL(operands[2]) == 1 ){
1.16147 ++ return "ret%1 %d2\;ret%i1 %3";
1.16148 ++ } else {
1.16149 ++ return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
1.16150 ++ }
1.16151 ++ } else {
1.16152 ++ if ( (INTVAL(operands[2]) == -1
1.16153 ++ || INTVAL(operands[2]) == 0
1.16154 ++ || INTVAL(operands[2]) == 1 )
1.16155 ++ && (INTVAL(operands[3]) == -1
1.16156 ++ || INTVAL(operands[3]) == 0
1.16157 ++ || INTVAL(operands[3]) == 1 )){
1.16158 ++ return "ret%1 %d2\;ret%i1 %d3";
1.16159 ++ } else {
1.16160 ++ return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
1.16161 ++ }
1.16162 ++ }
1.16163 ++ }
1.16164 ++
1.16165 ++ [(set_attr "length" "10")
1.16166 ++ (set_attr "cc" "none")
1.16167 ++ (set_attr "type" "call")])
1.16168 ++
1.16169 ++
1.16170 ++
1.16171 ++;;=================================================================
1.16172 ++;; mulnhh.w
1.16173 ++;;=================================================================
1.16174 ++
1.16175 ++(define_peephole2
1.16176 ++ [(set (match_operand:HI 0 "register_operand" "")
1.16177 ++ (neg:HI (match_operand:HI 1 "register_operand" "")))
1.16178 ++ (set (match_operand:SI 2 "register_operand" "")
1.16179 ++ (mult:SI
1.16180 ++ (sign_extend:SI (match_dup 0))
1.16181 ++ (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))]
1.16182 ++ "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
1.16183 ++ [ (set (match_dup 2)
1.16184 ++ (mult:SI
1.16185 ++ (sign_extend:SI (neg:HI (match_dup 1)))
1.16186 ++ (sign_extend:SI (match_dup 3))))]
1.16187 ++ ""
1.16188 ++ )
1.16189 ++
1.16190 ++(define_peephole2
1.16191 ++ [(set (match_operand:HI 0 "register_operand" "")
1.16192 ++ (neg:HI (match_operand:HI 1 "register_operand" "")))
1.16193 ++ (set (match_operand:SI 2 "register_operand" "")
1.16194 ++ (mult:SI
1.16195 ++ (sign_extend:SI (match_operand:HI 3 "register_operand" ""))
1.16196 ++ (sign_extend:SI (match_dup 0))))]
1.16197 ++ "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
1.16198 ++ [ (set (match_dup 2)
1.16199 ++ (mult:SI
1.16200 ++ (sign_extend:SI (neg:HI (match_dup 1)))
1.16201 ++ (sign_extend:SI (match_dup 3))))]
1.16202 ++ ""
1.16203 ++ )
1.16204 ++
1.16205 ++
1.16206 ++
1.16207 ++;;=================================================================
1.16208 ++;; Vector set and extract operations
1.16209 ++;;=================================================================
1.16210 ++(define_insn "vec_setv2hi_hi"
1.16211 ++ [(set (match_operand:V2HI 0 "register_operand" "=r")
1.16212 ++ (vec_merge:V2HI
1.16213 ++ (match_dup 0)
1.16214 ++ (vec_duplicate:V2HI
1.16215 ++ (match_operand:HI 1 "register_operand" "r"))
1.16216 ++ (const_int 1)))]
1.16217 ++ ""
1.16218 ++ "bfins\t%0, %1, 16, 16"
1.16219 ++ [(set_attr "type" "alu")
1.16220 ++ (set_attr "length" "4")
1.16221 ++ (set_attr "cc" "clobber")])
1.16222 ++
1.16223 ++(define_insn "vec_setv2hi_lo"
1.16224 ++ [(set (match_operand:V2HI 0 "register_operand" "+r")
1.16225 ++ (vec_merge:V2HI
1.16226 ++ (match_dup 0)
1.16227 ++ (vec_duplicate:V2HI
1.16228 ++ (match_operand:HI 1 "register_operand" "r"))
1.16229 ++ (const_int 2)))]
1.16230 ++ ""
1.16231 ++ "bfins\t%0, %1, 0, 16"
1.16232 ++ [(set_attr "type" "alu")
1.16233 ++ (set_attr "length" "4")
1.16234 ++ (set_attr "cc" "clobber")])
1.16235 ++
1.16236 ++(define_expand "vec_setv2hi"
1.16237 ++ [(set (match_operand:V2HI 0 "register_operand" "")
1.16238 ++ (vec_merge:V2HI
1.16239 ++ (match_dup 0)
1.16240 ++ (vec_duplicate:V2HI
1.16241 ++ (match_operand:HI 1 "register_operand" ""))
1.16242 ++ (match_operand 2 "immediate_operand" "")))]
1.16243 ++ ""
1.16244 ++ { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); }
1.16245 ++ )
1.16246 ++
1.16247 ++(define_insn "vec_extractv2hi"
1.16248 ++ [(set (match_operand:HI 0 "register_operand" "=r")
1.16249 ++ (vec_select:HI
1.16250 ++ (match_operand:V2HI 1 "register_operand" "r")
1.16251 ++ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
1.16252 ++ ""
1.16253 ++ {
1.16254 ++ if ( INTVAL(operands[2]) == 0 )
1.16255 ++ return "bfextu\t%0, %1, 16, 16";
1.16256 ++ else
1.16257 ++ return "bfextu\t%0, %1, 0, 16";
1.16258 ++ }
1.16259 ++ [(set_attr "type" "alu")
1.16260 ++ (set_attr "length" "4")
1.16261 ++ (set_attr "cc" "clobber")])
1.16262 ++
1.16263 ++(define_insn "vec_extractv4qi"
1.16264 ++ [(set (match_operand:QI 0 "register_operand" "=r")
1.16265 ++ (vec_select:QI
1.16266 ++ (match_operand:V4QI 1 "register_operand" "r")
1.16267 ++ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
1.16268 ++ ""
1.16269 ++ {
1.16270 ++ switch ( INTVAL(operands[2]) ){
1.16271 ++ case 0:
1.16272 ++ return "bfextu\t%0, %1, 24, 8";
1.16273 ++ case 1:
1.16274 ++ return "bfextu\t%0, %1, 16, 8";
1.16275 ++ case 2:
1.16276 ++ return "bfextu\t%0, %1, 8, 8";
1.16277 ++ case 3:
1.16278 ++ return "bfextu\t%0, %1, 0, 8";
1.16279 ++ default:
1.16280 ++ abort();
1.16281 ++ }
1.16282 ++ }
1.16283 ++ [(set_attr "type" "alu")
1.16284 ++ (set_attr "length" "4")
1.16285 ++ (set_attr "cc" "clobber")])
1.16286 ++
1.16287 ++
1.16288 ++(define_insn "concatv2hi"
1.16289 ++ [(set (match_operand:V2HI 0 "register_operand" "=r, r, r")
1.16290 ++ (vec_concat:V2HI
1.16291 ++ (match_operand:HI 1 "register_operand" "r, r, 0")
1.16292 ++ (match_operand:HI 2 "register_operand" "r, 0, r")))]
1.16293 ++ ""
1.16294 ++ "@
1.16295 ++ mov\t%0, %1\;bfins\t%0, %2, 0, 16
1.16296 ++ bfins\t%0, %2, 0, 16
1.16297 ++ bfins\t%0, %1, 16, 16"
1.16298 ++ [(set_attr "length" "6, 4, 4")
1.16299 ++ (set_attr "type" "alu")])
1.16300 ++
1.16301 ++
1.16302 ++;; Load the atomic operation description
1.16303 ++(include "sync.md")
1.16304 ++
1.16305 ++;; Load the SIMD description
1.16306 ++(include "simd.md")
1.16307 ++
1.16308 ++;; Load the FP coprAocessor patterns
1.16309 ++(include "fpcp.md")
1.16310 +--- /dev/null
1.16311 ++++ b/gcc/config/avr32/avr32-modes.def
1.16312 +@@ -0,0 +1 @@
1.16313 ++VECTOR_MODES (INT, 4); /* V4QI V2HI */
1.16314 +--- /dev/null
1.16315 ++++ b/gcc/config/avr32/avr32.opt
1.16316 +@@ -0,0 +1,86 @@
1.16317 ++; Options for the ATMEL AVR32 port of the compiler.
1.16318 ++
1.16319 ++; Copyright 2007 Atmel Corporation.
1.16320 ++;
1.16321 ++; This file is part of GCC.
1.16322 ++;
1.16323 ++; GCC is free software; you can redistribute it and/or modify it under
1.16324 ++; the terms of the GNU General Public License as published by the Free
1.16325 ++; Software Foundation; either version 2, or (at your option) any later
1.16326 ++; version.
1.16327 ++;
1.16328 ++; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
1.16329 ++; WARRANTY; without even the implied warranty of MERCHANTABILITY or
1.16330 ++; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
1.16331 ++; for more details.
1.16332 ++;
1.16333 ++; You should have received a copy of the GNU General Public License
1.16334 ++; along with GCC; see the file COPYING. If not, write to the Free
1.16335 ++; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
1.16336 ++; 02110-1301, USA.
1.16337 ++
1.16338 ++muse-rodata-section
1.16339 ++Target Report Mask(USE_RODATA_SECTION)
1.16340 ++Use section .rodata for read-only data instead of .text.
1.16341 ++
1.16342 ++mhard-float
1.16343 ++Target Report Undocumented Mask(HARD_FLOAT)
1.16344 ++Use floating point coprocessor instructions.
1.16345 ++
1.16346 ++msoft-float
1.16347 ++Target Report Undocumented InverseMask(HARD_FLOAT, SOFT_FLOAT)
1.16348 ++Use software floating-point library for floating-point operations.
1.16349 ++
1.16350 ++mforce-double-align
1.16351 ++Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN)
1.16352 ++Force double-word alignment for double-word memory accesses.
1.16353 ++
1.16354 ++mno-init-got
1.16355 ++Target Report RejectNegative Mask(NO_INIT_GOT)
1.16356 ++Do not initialize GOT register before using it when compiling PIC code.
1.16357 ++
1.16358 ++mrelax
1.16359 ++Target Report Mask(RELAX)
1.16360 ++Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1).
1.16361 ++
1.16362 ++mmd-reorg-opt
1.16363 ++Target Report Undocumented Mask(MD_REORG_OPTIMIZATION)
1.16364 ++Perform machine dependent optimizations in reorg stage.
1.16365 ++
1.16366 ++masm-addr-pseudos
1.16367 ++Target Report Mask(HAS_ASM_ADDR_PSEUDOS)
1.16368 ++Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default)
1.16369 ++
1.16370 ++mpart=
1.16371 ++Target Report RejectNegative Joined Var(avr32_part_name)
1.16372 ++Specify the AVR32 part name
1.16373 ++
1.16374 ++mcpu=
1.16375 ++Target Report RejectNegative Joined Undocumented Var(avr32_part_name)
1.16376 ++Specify the AVR32 part name (deprecated)
1.16377 ++
1.16378 ++march=
1.16379 ++Target Report RejectNegative Joined Var(avr32_arch_name)
1.16380 ++Specify the AVR32 architecture name
1.16381 ++
1.16382 ++mfast-float
1.16383 ++Target Report Mask(FAST_FLOAT)
1.16384 ++Enable fast floating-point library. Enabled by default if the -funsafe-math-optimizations switch is specified.
1.16385 ++
1.16386 ++mimm-in-const-pool
1.16387 ++Target Report Var(avr32_imm_in_const_pool) Init(-1)
1.16388 ++Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
1.16389 ++
1.16390 ++mno-pic
1.16391 ++Target Report RejectNegative Mask(NO_PIC)
1.16392 ++Do not generate position-independent code. (deprecated, use -fno-pic instead)
1.16393 ++
1.16394 ++mcond-exec-before-reload
1.16395 ++Target Report Undocumented Mask(COND_EXEC_BEFORE_RELOAD)
1.16396 ++Enable experimental conditional execution preparation before the reload stage.
1.16397 ++
1.16398 ++mrmw-addressable-data
1.16399 ++Target Report Mask(RMW_ADDRESSABLE_DATA)
1.16400 ++Signal that all data is in range for the Atomic Read-Modify-Write memory instructions, and that
1.16401 ++gcc can safely generate these whenever possible.
1.16402 ++
1.16403 +--- /dev/null
1.16404 ++++ b/gcc/config/avr32/avr32-protos.h
1.16405 +@@ -0,0 +1,196 @@
1.16406 ++/*
1.16407 ++ Prototypes for exported functions defined in avr32.c
1.16408 ++ Copyright 2003-2006 Atmel Corporation.
1.16409 ++
1.16410 ++ Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.16411 ++ Initial porting by Anders �dland.
1.16412 ++
1.16413 ++ This file is part of GCC.
1.16414 ++
1.16415 ++ This program is free software; you can redistribute it and/or modify
1.16416 ++ it under the terms of the GNU General Public License as published by
1.16417 ++ the Free Software Foundation; either version 2 of the License, or
1.16418 ++ (at your option) any later version.
1.16419 ++
1.16420 ++ This program is distributed in the hope that it will be useful,
1.16421 ++ but WITHOUT ANY WARRANTY; without even the implied warranty of
1.16422 ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.16423 ++ GNU General Public License for more details.
1.16424 ++
1.16425 ++ You should have received a copy of the GNU General Public License
1.16426 ++ along with this program; if not, write to the Free Software
1.16427 ++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
1.16428 ++
1.16429 ++
1.16430 ++#ifndef AVR32_PROTOS_H
1.16431 ++#define AVR32_PROTOS_H
1.16432 ++
1.16433 ++extern const int swap_reg[];
1.16434 ++
1.16435 ++extern int avr32_valid_macmac_bypass (rtx, rtx);
1.16436 ++extern int avr32_valid_mulmac_bypass (rtx, rtx);
1.16437 ++
1.16438 ++extern int avr32_decode_lcomm_symbol_offset (rtx, int *);
1.16439 ++extern void avr32_encode_lcomm_symbol_offset (tree, char *, int);
1.16440 ++
1.16441 ++extern const char *avr32_strip_name_encoding (const char *);
1.16442 ++
1.16443 ++extern rtx avr32_get_note_reg_equiv (rtx insn);
1.16444 ++
1.16445 ++extern int avr32_use_return_insn (int iscond);
1.16446 ++
1.16447 ++extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string);
1.16448 ++
1.16449 ++extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string);
1.16450 ++extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string);
1.16451 ++extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string);
1.16452 ++
1.16453 ++extern void avr32_output_return_instruction (int single_ret_inst,
1.16454 ++ int iscond, rtx cond,
1.16455 ++ rtx r12_imm);
1.16456 ++extern void avr32_expand_prologue (void);
1.16457 ++extern void avr32_set_return_address (rtx source, rtx scratch);
1.16458 ++
1.16459 ++extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode);
1.16460 ++extern int avr32_extra_constraint_s (rtx value, const int strict);
1.16461 ++extern int avr32_eh_return_data_regno (const int n);
1.16462 ++extern int avr32_initial_elimination_offset (const int from, const int to);
1.16463 ++extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
1.16464 ++ tree type, int named);
1.16465 ++extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
1.16466 ++ rtx libname, tree fndecl);
1.16467 ++extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum,
1.16468 ++ enum machine_mode mode,
1.16469 ++ tree type, int named);
1.16470 ++#ifdef ARGS_SIZE_RTX
1.16471 ++/* expr.h defines ARGS_SIZE_RTX and `enum direction'. */
1.16472 ++extern enum direction avr32_function_arg_padding (enum machine_mode mode,
1.16473 ++ tree type);
1.16474 ++#endif /* ARGS_SIZE_RTX */
1.16475 ++extern rtx avr32_function_value (tree valtype, tree func, bool outgoing);
1.16476 ++extern rtx avr32_libcall_value (enum machine_mode mode);
1.16477 ++extern int avr32_sched_use_dfa_pipeline_interface (void);
1.16478 ++extern bool avr32_return_in_memory (tree type, tree fntype);
1.16479 ++extern void avr32_regs_to_save (char *operand);
1.16480 ++extern void avr32_target_asm_function_prologue (FILE * file,
1.16481 ++ HOST_WIDE_INT size);
1.16482 ++extern void avr32_target_asm_function_epilogue (FILE * file,
1.16483 ++ HOST_WIDE_INT size);
1.16484 ++extern void avr32_trampoline_template (FILE * file);
1.16485 ++extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr,
1.16486 ++ rtx static_chain);
1.16487 ++extern int avr32_legitimate_address (enum machine_mode mode, rtx x,
1.16488 ++ int strict);
1.16489 ++extern int avr32_legitimate_constant_p (rtx x);
1.16490 ++
1.16491 ++extern int avr32_legitimate_pic_operand_p (rtx x);
1.16492 ++
1.16493 ++extern rtx avr32_find_symbol (rtx x);
1.16494 ++extern void avr32_select_section (rtx exp, int reloc, int align);
1.16495 ++extern void avr32_encode_section_info (tree decl, rtx rtl, int first);
1.16496 ++extern void avr32_asm_file_end (FILE * stream);
1.16497 ++extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len);
1.16498 ++extern void avr32_asm_output_common (FILE * stream, const char *name,
1.16499 ++ int size, int rounded);
1.16500 ++extern void avr32_asm_output_label (FILE * stream, const char *name);
1.16501 ++extern void avr32_asm_declare_object_name (FILE * stream, char *name,
1.16502 ++ tree decl);
1.16503 ++extern void avr32_asm_globalize_label (FILE * stream, const char *name);
1.16504 ++extern void avr32_asm_weaken_label (FILE * stream, const char *name);
1.16505 ++extern void avr32_asm_output_external (FILE * stream, tree decl,
1.16506 ++ const char *name);
1.16507 ++extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref);
1.16508 ++extern void avr32_asm_output_labelref (FILE * stream, const char *name);
1.16509 ++extern void avr32_notice_update_cc (rtx exp, rtx insn);
1.16510 ++extern void avr32_print_operand (FILE * stream, rtx x, int code);
1.16511 ++extern void avr32_print_operand_address (FILE * stream, rtx x);
1.16512 ++
1.16513 ++extern int avr32_symbol (rtx x);
1.16514 ++
1.16515 ++extern void avr32_select_rtx_section (enum machine_mode mode, rtx x,
1.16516 ++ unsigned HOST_WIDE_INT align);
1.16517 ++
1.16518 ++extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode);
1.16519 ++extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode);
1.16520 ++
1.16521 ++extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c,
1.16522 ++ const char *str);
1.16523 ++
1.16524 ++extern bool avr32_cannot_force_const_mem (rtx x);
1.16525 ++
1.16526 ++extern void avr32_init_builtins (void);
1.16527 ++
1.16528 ++extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget,
1.16529 ++ enum machine_mode mode, int ignore);
1.16530 ++
1.16531 ++extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type);
1.16532 ++
1.16533 ++extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca);
1.16534 ++
1.16535 ++extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum,
1.16536 ++ enum machine_mode mode,
1.16537 ++ tree type, bool named);
1.16538 ++
1.16539 ++extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from,
1.16540 ++ int write_back, int in_struct_p,
1.16541 ++ int scalar_p);
1.16542 ++extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to,
1.16543 ++ int in_struct_p, int scalar_p);
1.16544 ++extern int avr32_gen_movmemsi (rtx * operands);
1.16545 ++
1.16546 ++extern int avr32_rnd_operands (rtx add, rtx shift);
1.16547 ++extern int avr32_adjust_insn_length (rtx insn, int length);
1.16548 ++
1.16549 ++extern int symbol_mentioned_p (rtx x);
1.16550 ++extern int label_mentioned_p (rtx x);
1.16551 ++extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg);
1.16552 ++extern int avr32_address_register_rtx_p (rtx x, int strict_p);
1.16553 ++extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index,
1.16554 ++ int strict_p);
1.16555 ++
1.16556 ++extern int avr32_const_double_immediate (rtx value);
1.16557 ++extern void avr32_init_expanders (void);
1.16558 ++extern rtx avr32_return_addr (int count, rtx frame);
1.16559 ++extern bool avr32_got_mentioned_p (rtx addr);
1.16560 ++
1.16561 ++extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands);
1.16562 ++
1.16563 ++extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]);
1.16564 ++extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]);
1.16565 ++#ifdef RTX_CODE
1.16566 ++extern int avr32_expand_scc (RTX_CODE cond, rtx * operands);
1.16567 ++#endif
1.16568 ++
1.16569 ++extern int avr32_store_bypass (rtx insn_out, rtx insn_in);
1.16570 ++extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in);
1.16571 ++extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in);
1.16572 ++extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in);
1.16573 ++extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode,
1.16574 ++ rtx op0, rtx op1);
1.16575 ++
1.16576 ++rtx get_next_insn_cond (rtx cur_insn);
1.16577 ++int set_next_insn_cond (rtx cur_insn, rtx cond);
1.16578 ++void avr32_override_options (void);
1.16579 ++void avr32_load_pic_register (void);
1.16580 ++#ifdef GCC_BASIC_BLOCK_H
1.16581 ++rtx avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
1.16582 ++ int *num_true_changes);
1.16583 ++rtx avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test );
1.16584 ++void avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes);
1.16585 ++#endif
1.16586 ++void avr32_optimization_options (int level, int size);
1.16587 ++int avr32_const_ok_for_move (HOST_WIDE_INT c);
1.16588 ++
1.16589 ++void avr32_split_const_expr (enum machine_mode mode,
1.16590 ++ enum machine_mode new_mode,
1.16591 ++ rtx expr,
1.16592 ++ rtx *split_expr);
1.16593 ++void avr32_get_intval (enum machine_mode mode,
1.16594 ++ rtx const_expr,
1.16595 ++ HOST_WIDE_INT *val);
1.16596 ++
1.16597 ++int avr32_cond_imm_clobber_splittable (rtx insn,
1.16598 ++ rtx operands[]);
1.16599 ++
1.16600 ++
1.16601 ++#endif /* AVR32_PROTOS_H */
1.16602 +--- /dev/null
1.16603 ++++ b/gcc/config/avr32/crti.asm
1.16604 +@@ -0,0 +1,64 @@
1.16605 ++/*
1.16606 ++ Init/fini stuff for AVR32.
1.16607 ++ Copyright 2003-2006 Atmel Corporation.
1.16608 ++
1.16609 ++ Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.16610 ++
1.16611 ++ This file is part of GCC.
1.16612 ++
1.16613 ++ This program is free software; you can redistribute it and/or modify
1.16614 ++ it under the terms of the GNU General Public License as published by
1.16615 ++ the Free Software Foundation; either version 2 of the License, or
1.16616 ++ (at your option) any later version.
1.16617 ++
1.16618 ++ This program is distributed in the hope that it will be useful,
1.16619 ++ but WITHOUT ANY WARRANTY; without even the implied warranty of
1.16620 ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.16621 ++ GNU General Public License for more details.
1.16622 ++
1.16623 ++ You should have received a copy of the GNU General Public License
1.16624 ++ along with this program; if not, write to the Free Software
1.16625 ++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
1.16626 ++
1.16627 ++
1.16628 ++/* The code in sections .init and .fini is supposed to be a single
1.16629 ++ regular function. The function in .init is called directly from
1.16630 ++ start in crt1.asm. The function in .fini is atexit()ed in crt1.asm
1.16631 ++ too.
1.16632 ++
1.16633 ++ crti.asm contributes the prologue of a function to these sections,
1.16634 ++ and crtn.asm comes up the epilogue. STARTFILE_SPEC should list
1.16635 ++ crti.o before any other object files that might add code to .init
1.16636 ++ or .fini sections, and ENDFILE_SPEC should list crtn.o after any
1.16637 ++ such object files. */
1.16638 ++
1.16639 ++ .file "crti.asm"
1.16640 ++
1.16641 ++ .section ".init"
1.16642 ++/* Just load the GOT */
1.16643 ++ .align 2
1.16644 ++ .global _init
1.16645 ++_init:
1.16646 ++ stm --sp, r6, lr
1.16647 ++ lddpc r6, 1f
1.16648 ++0:
1.16649 ++ rsub r6, pc
1.16650 ++ rjmp 2f
1.16651 ++ .align 2
1.16652 ++1: .long 0b - _GLOBAL_OFFSET_TABLE_
1.16653 ++2:
1.16654 ++
1.16655 ++ .section ".fini"
1.16656 ++/* Just load the GOT */
1.16657 ++ .align 2
1.16658 ++ .global _fini
1.16659 ++_fini:
1.16660 ++ stm --sp, r6, lr
1.16661 ++ lddpc r6, 1f
1.16662 ++0:
1.16663 ++ rsub r6, pc
1.16664 ++ rjmp 2f
1.16665 ++ .align 2
1.16666 ++1: .long 0b - _GLOBAL_OFFSET_TABLE_
1.16667 ++2:
1.16668 ++
1.16669 +--- /dev/null
1.16670 ++++ b/gcc/config/avr32/crtn.asm
1.16671 +@@ -0,0 +1,44 @@
1.16672 ++/* Copyright (C) 2001 Free Software Foundation, Inc.
1.16673 ++ Written By Nick Clifton
1.16674 ++
1.16675 ++ This file is free software; you can redistribute it and/or modify it
1.16676 ++ under the terms of the GNU General Public License as published by the
1.16677 ++ Free Software Foundation; either version 2, or (at your option) any
1.16678 ++ later version.
1.16679 ++
1.16680 ++ In addition to the permissions in the GNU General Public License, the
1.16681 ++ Free Software Foundation gives you unlimited permission to link the
1.16682 ++ compiled version of this file with other programs, and to distribute
1.16683 ++ those programs without any restriction coming from the use of this
1.16684 ++ file. (The General Public License restrictions do apply in other
1.16685 ++ respects; for example, they cover modification of the file, and
1.16686 ++ distribution when not linked into another program.)
1.16687 ++
1.16688 ++ This file is distributed in the hope that it will be useful, but
1.16689 ++ WITHOUT ANY WARRANTY; without even the implied warranty of
1.16690 ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1.16691 ++ General Public License for more details.
1.16692 ++
1.16693 ++ You should have received a copy of the GNU General Public License
1.16694 ++ along with this program; see the file COPYING. If not, write to
1.16695 ++ the Free Software Foundation, 59 Temple Place - Suite 330,
1.16696 ++ Boston, MA 02111-1307, USA.
1.16697 ++
1.16698 ++ As a special exception, if you link this library with files
1.16699 ++ compiled with GCC to produce an executable, this does not cause
1.16700 ++ the resulting executable to be covered by the GNU General Public License.
1.16701 ++ This exception does not however invalidate any other reasons why
1.16702 ++ the executable file might be covered by the GNU General Public License.
1.16703 ++*/
1.16704 ++
1.16705 ++
1.16706 ++
1.16707 ++
1.16708 ++ .file "crtn.asm"
1.16709 ++
1.16710 ++ .section ".init"
1.16711 ++ ldm sp++, r6, pc
1.16712 ++
1.16713 ++ .section ".fini"
1.16714 ++ ldm sp++, r6, pc
1.16715 ++
1.16716 +--- /dev/null
1.16717 ++++ b/gcc/config/avr32/fpcp.md
1.16718 +@@ -0,0 +1,551 @@
1.16719 ++;; AVR32 machine description file for Floating-Point instructions.
1.16720 ++;; Copyright 2003-2006 Atmel Corporation.
1.16721 ++;;
1.16722 ++;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.16723 ++;;
1.16724 ++;; This file is part of GCC.
1.16725 ++;;
1.16726 ++;; This program is free software; you can redistribute it and/or modify
1.16727 ++;; it under the terms of the GNU General Public License as published by
1.16728 ++;; the Free Software Foundation; either version 2 of the License, or
1.16729 ++;; (at your option) any later version.
1.16730 ++;;
1.16731 ++;; This program is distributed in the hope that it will be useful,
1.16732 ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
1.16733 ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.16734 ++;; GNU General Public License for more details.
1.16735 ++;;
1.16736 ++;; You should have received a copy of the GNU General Public License
1.16737 ++;; along with this program; if not, write to the Free Software
1.16738 ++;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
1.16739 ++
1.16740 ++;; -*- Mode: Scheme -*-
1.16741 ++
1.16742 ++;;******************************************************************************
1.16743 ++;; Automaton pipeline description for floating-point coprocessor insns
1.16744 ++;;******************************************************************************
1.16745 ++(define_cpu_unit "fid,fm1,fm2,fm3,fm4,fwb,fcmp,fcast" "avr32_ap")
1.16746 ++
1.16747 ++(define_insn_reservation "fmv_op" 1
1.16748 ++ (and (eq_attr "pipeline" "ap")
1.16749 ++ (eq_attr "type" "fmv"))
1.16750 ++ "is,da,d,fid,fwb")
1.16751 ++
1.16752 ++(define_insn_reservation "fmul_op" 5
1.16753 ++ (and (eq_attr "pipeline" "ap")
1.16754 ++ (eq_attr "type" "fmul"))
1.16755 ++ "is,da,d,fid,fm1,fm2,fm3,fm4,fwb")
1.16756 ++
1.16757 ++(define_insn_reservation "fcmps_op" 1
1.16758 ++ (and (eq_attr "pipeline" "ap")
1.16759 ++ (eq_attr "type" "fcmps"))
1.16760 ++ "is,da,d,fid,fcmp")
1.16761 ++
1.16762 ++(define_insn_reservation "fcmpd_op" 2
1.16763 ++ (and (eq_attr "pipeline" "ap")
1.16764 ++ (eq_attr "type" "fcmpd"))
1.16765 ++ "is,da,d,fid*2,fcmp")
1.16766 ++
1.16767 ++(define_insn_reservation "fcast_op" 3
1.16768 ++ (and (eq_attr "pipeline" "ap")
1.16769 ++ (eq_attr "type" "fcast"))
1.16770 ++ "is,da,d,fid,fcmp,fcast,fwb")
1.16771 ++
1.16772 ++(define_insn_reservation "fmvcpu_op" 2
1.16773 ++ (and (eq_attr "pipeline" "ap")
1.16774 ++ (eq_attr "type" "fmvcpu"))
1.16775 ++ "is,da,d")
1.16776 ++
1.16777 ++(define_insn_reservation "fldd_op" 1
1.16778 ++ (and (eq_attr "pipeline" "ap")
1.16779 ++ (eq_attr "type" "fldd"))
1.16780 ++ "is,da,d,fwb")
1.16781 ++
1.16782 ++(define_insn_reservation "flds_op" 1
1.16783 ++ (and (eq_attr "pipeline" "ap")
1.16784 ++ (eq_attr "type" "flds"))
1.16785 ++ "is,da,d,fwb")
1.16786 ++
1.16787 ++(define_insn_reservation "fsts_op" 0
1.16788 ++ (and (eq_attr "pipeline" "ap")
1.16789 ++ (eq_attr "type" "fsts"))
1.16790 ++ "is,da*2,d")
1.16791 ++
1.16792 ++(define_insn_reservation "fstd_op" 0
1.16793 ++ (and (eq_attr "pipeline" "ap")
1.16794 ++ (eq_attr "type" "fstd"))
1.16795 ++ "is,da*2,d")
1.16796 ++
1.16797 ++
1.16798 ++(define_insn "*movsf_fpcp"
1.16799 ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,r,m")
1.16800 ++ (match_operand:SF 1 "general_operand" " f,r,f,m,f,r,G,m,r"))]
1.16801 ++ "TARGET_HARD_FLOAT"
1.16802 ++ "@
1.16803 ++ fmov.s\t%0, %1
1.16804 ++ fmov.s\t%0, %1
1.16805 ++ fmov.s\t%0, %1
1.16806 ++ fld.s\t%0, %1
1.16807 ++ fst.s\t%0, %1
1.16808 ++ mov\t%0, %1
1.16809 ++ mov\t%0, %1
1.16810 ++ ld.w\t%0, %1
1.16811 ++ st.w\t%0, %1"
1.16812 ++ [(set_attr "length" "4,4,4,4,4,2,4,4,4")
1.16813 ++ (set_attr "type" "fmv,flds,fmvcpu,flds,fsts,alu,alu,load,store")])
1.16814 ++
1.16815 ++(define_insn_and_split "*movdf_fpcp"
1.16816 ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,m")
1.16817 ++ (match_operand:DF 1 "general_operand" " f,r,f,m,f,r,m,r"))]
1.16818 ++ "TARGET_HARD_FLOAT"
1.16819 ++ "@
1.16820 ++ fmov.d\t%0, %1
1.16821 ++ fmov.d\t%0, %1
1.16822 ++ fmov.d\t%0, %1
1.16823 ++ fld.d\t%0, %1
1.16824 ++ fst.d\t%0, %1
1.16825 ++ mov\t%0, %1\;mov\t%m0, %m1
1.16826 ++ ld.d\t%0, %1
1.16827 ++ st.d\t%0, %1"
1.16828 ++
1.16829 ++ "TARGET_HARD_FLOAT
1.16830 ++ && reload_completed
1.16831 ++ && (REG_P(operands[0]) && (REGNO_REG_CLASS(REGNO(operands[0])) == GENERAL_REGS))
1.16832 ++ && (REG_P(operands[1]) && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS))"
1.16833 ++ [(set (match_dup 0) (match_dup 1))
1.16834 ++ (set (match_dup 2) (match_dup 3))]
1.16835 ++ "
1.16836 ++ {
1.16837 ++ operands[2] = gen_highpart (SImode, operands[0]);
1.16838 ++ operands[0] = gen_lowpart (SImode, operands[0]);
1.16839 ++ operands[3] = gen_highpart(SImode, operands[1]);
1.16840 ++ operands[1] = gen_lowpart(SImode, operands[1]);
1.16841 ++ }
1.16842 ++ "
1.16843 ++
1.16844 ++ [(set_attr "length" "4,4,4,4,4,4,4,4")
1.16845 ++ (set_attr "type" "fmv,fldd,fmvcpu,fldd,fstd,alu2,load2,store2")])
1.16846 ++
1.16847 ++
1.16848 ++(define_insn "mulsf3"
1.16849 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16850 ++ (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
1.16851 ++ (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
1.16852 ++ "TARGET_HARD_FLOAT"
1.16853 ++ "fmul.s\t%0, %1, %2"
1.16854 ++ [(set_attr "length" "4")
1.16855 ++ (set_attr "type" "fmul")])
1.16856 ++
1.16857 ++(define_insn "nmulsf3"
1.16858 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16859 ++ (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
1.16860 ++ (match_operand:SF 2 "avr32_fp_register_operand" "f"))))]
1.16861 ++ "TARGET_HARD_FLOAT"
1.16862 ++ "fnmul.s\t%0, %1, %2"
1.16863 ++ [(set_attr "length" "4")
1.16864 ++ (set_attr "type" "fmul")])
1.16865 ++
1.16866 ++(define_peephole2
1.16867 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
1.16868 ++ (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
1.16869 ++ (match_operand:SF 2 "avr32_fp_register_operand" "")))
1.16870 ++ (set (match_operand:SF 3 "avr32_fp_register_operand" "")
1.16871 ++ (neg:SF (match_dup 0)))]
1.16872 ++ "TARGET_HARD_FLOAT &&
1.16873 ++ (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))"
1.16874 ++ [(set (match_dup 3)
1.16875 ++ (neg:SF (mult:SF (match_dup 1)
1.16876 ++ (match_dup 2))))]
1.16877 ++)
1.16878 ++
1.16879 ++
1.16880 ++(define_insn "macsf3"
1.16881 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16882 ++ (plus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
1.16883 ++ (match_operand:SF 2 "avr32_fp_register_operand" "f"))
1.16884 ++ (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
1.16885 ++ "TARGET_HARD_FLOAT"
1.16886 ++ "fmac.s\t%0, %1, %2"
1.16887 ++ [(set_attr "length" "4")
1.16888 ++ (set_attr "type" "fmul")])
1.16889 ++
1.16890 ++(define_insn "nmacsf3"
1.16891 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16892 ++ (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
1.16893 ++ (match_operand:SF 2 "avr32_fp_register_operand" "f")))
1.16894 ++ (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
1.16895 ++ "TARGET_HARD_FLOAT"
1.16896 ++ "fnmac.s\t%0, %1, %2"
1.16897 ++ [(set_attr "length" "4")
1.16898 ++ (set_attr "type" "fmul")])
1.16899 ++
1.16900 ++(define_peephole2
1.16901 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
1.16902 ++ (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
1.16903 ++ (match_operand:SF 2 "avr32_fp_register_operand" "")))
1.16904 ++ (set (match_operand:SF 3 "avr32_fp_register_operand" "")
1.16905 ++ (minus:SF
1.16906 ++ (match_dup 3)
1.16907 ++ (match_dup 0)))]
1.16908 ++ "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
1.16909 ++ [(set (match_dup 3)
1.16910 ++ (plus:SF (neg:SF (mult:SF (match_dup 1)
1.16911 ++ (match_dup 2)))
1.16912 ++ (match_dup 3)))]
1.16913 ++)
1.16914 ++
1.16915 ++
1.16916 ++(define_insn "msubacsf3"
1.16917 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16918 ++ (minus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
1.16919 ++ (match_operand:SF 2 "avr32_fp_register_operand" "f"))
1.16920 ++ (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
1.16921 ++ "TARGET_HARD_FLOAT"
1.16922 ++ "fmsc.s\t%0, %1, %2"
1.16923 ++ [(set_attr "length" "4")
1.16924 ++ (set_attr "type" "fmul")])
1.16925 ++
1.16926 ++(define_peephole2
1.16927 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
1.16928 ++ (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
1.16929 ++ (match_operand:SF 2 "avr32_fp_register_operand" "")))
1.16930 ++ (set (match_operand:SF 3 "avr32_fp_register_operand" "")
1.16931 ++ (minus:SF
1.16932 ++ (match_dup 0)
1.16933 ++ (match_dup 3)))]
1.16934 ++ "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
1.16935 ++ [(set (match_dup 3)
1.16936 ++ (minus:SF (mult:SF (match_dup 1)
1.16937 ++ (match_dup 2))
1.16938 ++ (match_dup 3)))]
1.16939 ++)
1.16940 ++
1.16941 ++(define_insn "nmsubacsf3"
1.16942 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16943 ++ (minus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
1.16944 ++ (match_operand:SF 2 "avr32_fp_register_operand" "f")))
1.16945 ++ (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
1.16946 ++ "TARGET_HARD_FLOAT"
1.16947 ++ "fnmsc.s\t%0, %1, %2"
1.16948 ++ [(set_attr "length" "4")
1.16949 ++ (set_attr "type" "fmul")])
1.16950 ++
1.16951 ++
1.16952 ++
1.16953 ++(define_insn "addsf3"
1.16954 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16955 ++ (plus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
1.16956 ++ (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
1.16957 ++ "TARGET_HARD_FLOAT"
1.16958 ++ "fadd.s\t%0, %1, %2"
1.16959 ++ [(set_attr "length" "4")
1.16960 ++ (set_attr "type" "fmul")])
1.16961 ++
1.16962 ++(define_insn "subsf3"
1.16963 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16964 ++ (minus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
1.16965 ++ (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
1.16966 ++ "TARGET_HARD_FLOAT"
1.16967 ++ "fsub.s\t%0, %1, %2"
1.16968 ++ [(set_attr "length" "4")
1.16969 ++ (set_attr "type" "fmul")])
1.16970 ++
1.16971 ++
1.16972 ++(define_insn "negsf2"
1.16973 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16974 ++ (neg:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
1.16975 ++ "TARGET_HARD_FLOAT"
1.16976 ++ "fneg.s\t%0, %1"
1.16977 ++ [(set_attr "length" "4")
1.16978 ++ (set_attr "type" "fmv")])
1.16979 ++
1.16980 ++(define_insn "abssf2"
1.16981 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16982 ++ (abs:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
1.16983 ++ "TARGET_HARD_FLOAT"
1.16984 ++ "fabs.s\t%0, %1"
1.16985 ++ [(set_attr "length" "4")
1.16986 ++ (set_attr "type" "fmv")])
1.16987 ++
1.16988 ++(define_insn "truncdfsf2"
1.16989 ++ [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
1.16990 ++ (float_truncate:SF
1.16991 ++ (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
1.16992 ++ "TARGET_HARD_FLOAT"
1.16993 ++ "fcastd.s\t%0, %1"
1.16994 ++ [(set_attr "length" "4")
1.16995 ++ (set_attr "type" "fcast")])
1.16996 ++
1.16997 ++(define_insn "extendsfdf2"
1.16998 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.16999 ++ (float_extend:DF
1.17000 ++ (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
1.17001 ++ "TARGET_HARD_FLOAT"
1.17002 ++ "fcasts.d\t%0, %1"
1.17003 ++ [(set_attr "length" "4")
1.17004 ++ (set_attr "type" "fcast")])
1.17005 ++
1.17006 ++(define_insn "muldf3"
1.17007 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17008 ++ (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
1.17009 ++ (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
1.17010 ++ "TARGET_HARD_FLOAT"
1.17011 ++ "fmul.d\t%0, %1, %2"
1.17012 ++ [(set_attr "length" "4")
1.17013 ++ (set_attr "type" "fmul")])
1.17014 ++
1.17015 ++(define_insn "nmuldf3"
1.17016 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17017 ++ (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
1.17018 ++ (match_operand:DF 2 "avr32_fp_register_operand" "f"))))]
1.17019 ++ "TARGET_HARD_FLOAT"
1.17020 ++ "fnmul.d\t%0, %1, %2"
1.17021 ++ [(set_attr "length" "4")
1.17022 ++ (set_attr "type" "fmul")])
1.17023 ++
1.17024 ++(define_peephole2
1.17025 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
1.17026 ++ (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
1.17027 ++ (match_operand:DF 2 "avr32_fp_register_operand" "")))
1.17028 ++ (set (match_operand:DF 3 "avr32_fp_register_operand" "")
1.17029 ++ (neg:DF (match_dup 0)))]
1.17030 ++ "TARGET_HARD_FLOAT &&
1.17031 ++ (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))"
1.17032 ++ [(set (match_dup 3)
1.17033 ++ (neg:DF (mult:DF (match_dup 1)
1.17034 ++ (match_dup 2))))]
1.17035 ++)
1.17036 ++
1.17037 ++(define_insn "macdf3"
1.17038 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17039 ++ (plus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
1.17040 ++ (match_operand:DF 2 "avr32_fp_register_operand" "f"))
1.17041 ++ (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
1.17042 ++ "TARGET_HARD_FLOAT"
1.17043 ++ "fmac.d\t%0, %1, %2"
1.17044 ++ [(set_attr "length" "4")
1.17045 ++ (set_attr "type" "fmul")])
1.17046 ++
1.17047 ++(define_insn "msubacdf3"
1.17048 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17049 ++ (minus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
1.17050 ++ (match_operand:DF 2 "avr32_fp_register_operand" "f"))
1.17051 ++ (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
1.17052 ++ "TARGET_HARD_FLOAT"
1.17053 ++ "fmsc.d\t%0, %1, %2"
1.17054 ++ [(set_attr "length" "4")
1.17055 ++ (set_attr "type" "fmul")])
1.17056 ++
1.17057 ++(define_peephole2
1.17058 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
1.17059 ++ (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
1.17060 ++ (match_operand:DF 2 "avr32_fp_register_operand" "")))
1.17061 ++ (set (match_operand:DF 3 "avr32_fp_register_operand" "")
1.17062 ++ (minus:DF
1.17063 ++ (match_dup 0)
1.17064 ++ (match_dup 3)))]
1.17065 ++ "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
1.17066 ++ [(set (match_dup 3)
1.17067 ++ (minus:DF (mult:DF (match_dup 1)
1.17068 ++ (match_dup 2))
1.17069 ++ (match_dup 3)))]
1.17070 ++ )
1.17071 ++
1.17072 ++(define_insn "nmsubacdf3"
1.17073 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17074 ++ (minus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
1.17075 ++ (match_operand:DF 2 "avr32_fp_register_operand" "f")))
1.17076 ++ (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
1.17077 ++ "TARGET_HARD_FLOAT"
1.17078 ++ "fnmsc.d\t%0, %1, %2"
1.17079 ++ [(set_attr "length" "4")
1.17080 ++ (set_attr "type" "fmul")])
1.17081 ++
1.17082 ++(define_insn "nmacdf3"
1.17083 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17084 ++ (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
1.17085 ++ (match_operand:DF 2 "avr32_fp_register_operand" "f")))
1.17086 ++ (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
1.17087 ++ "TARGET_HARD_FLOAT"
1.17088 ++ "fnmac.d\t%0, %1, %2"
1.17089 ++ [(set_attr "length" "4")
1.17090 ++ (set_attr "type" "fmul")])
1.17091 ++
1.17092 ++(define_peephole2
1.17093 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
1.17094 ++ (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
1.17095 ++ (match_operand:DF 2 "avr32_fp_register_operand" "")))
1.17096 ++ (set (match_operand:DF 3 "avr32_fp_register_operand" "")
1.17097 ++ (minus:DF
1.17098 ++ (match_dup 3)
1.17099 ++ (match_dup 0)))]
1.17100 ++ "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
1.17101 ++ [(set (match_dup 3)
1.17102 ++ (plus:DF (neg:DF (mult:DF (match_dup 1)
1.17103 ++ (match_dup 2)))
1.17104 ++ (match_dup 3)))]
1.17105 ++)
1.17106 ++
1.17107 ++(define_insn "adddf3"
1.17108 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17109 ++ (plus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
1.17110 ++ (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
1.17111 ++ "TARGET_HARD_FLOAT"
1.17112 ++ "fadd.d\t%0, %1, %2"
1.17113 ++ [(set_attr "length" "4")
1.17114 ++ (set_attr "type" "fmul")])
1.17115 ++
1.17116 ++(define_insn "subdf3"
1.17117 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17118 ++ (minus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
1.17119 ++ (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
1.17120 ++ "TARGET_HARD_FLOAT"
1.17121 ++ "fsub.d\t%0, %1, %2"
1.17122 ++ [(set_attr "length" "4")
1.17123 ++ (set_attr "type" "fmul")])
1.17124 ++
1.17125 ++(define_insn "negdf2"
1.17126 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17127 ++ (neg:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
1.17128 ++ "TARGET_HARD_FLOAT"
1.17129 ++ "fneg.d\t%0, %1"
1.17130 ++ [(set_attr "length" "4")
1.17131 ++ (set_attr "type" "fmv")])
1.17132 ++
1.17133 ++(define_insn "absdf2"
1.17134 ++ [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
1.17135 ++ (abs:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
1.17136 ++ "TARGET_HARD_FLOAT"
1.17137 ++ "fabs.d\t%0, %1"
1.17138 ++ [(set_attr "length" "4")
1.17139 ++ (set_attr "type" "fmv")])
1.17140 ++
1.17141 ++
1.17142 ++(define_expand "cmpdf"
1.17143 ++ [(set (cc0)
1.17144 ++ (compare:DF
1.17145 ++ (match_operand:DF 0 "general_operand" "")
1.17146 ++ (match_operand:DF 1 "general_operand" "")))]
1.17147 ++ "TARGET_HARD_FLOAT"
1.17148 ++ "{
1.17149 ++ rtx tmpreg;
1.17150 ++ if ( !REG_P(operands[0]) )
1.17151 ++ operands[0] = force_reg(DFmode, operands[0]);
1.17152 ++
1.17153 ++ if ( !REG_P(operands[1]) )
1.17154 ++ operands[1] = force_reg(DFmode, operands[1]);
1.17155 ++
1.17156 ++ avr32_compare_op0 = operands[0];
1.17157 ++ avr32_compare_op1 = operands[1];
1.17158 ++
1.17159 ++ emit_insn(gen_cmpdf_internal(operands[0], operands[1]));
1.17160 ++
1.17161 ++ tmpreg = gen_reg_rtx(SImode);
1.17162 ++ emit_insn(gen_fpcc_to_reg(tmpreg));
1.17163 ++ emit_insn(gen_reg_to_cc(tmpreg));
1.17164 ++
1.17165 ++ DONE;
1.17166 ++ }"
1.17167 ++)
1.17168 ++
1.17169 ++(define_insn "cmpdf_internal"
1.17170 ++ [(set (reg:CC FPCC_REGNUM)
1.17171 ++ (compare:CC
1.17172 ++ (match_operand:DF 0 "avr32_fp_register_operand" "f")
1.17173 ++ (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
1.17174 ++ "TARGET_HARD_FLOAT"
1.17175 ++ {
1.17176 ++ if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) )
1.17177 ++ return "fcmp.d\t%0, %1";
1.17178 ++ return "";
1.17179 ++ }
1.17180 ++ [(set_attr "length" "4")
1.17181 ++ (set_attr "type" "fcmpd")
1.17182 ++ (set_attr "cc" "fpcompare")])
1.17183 ++
1.17184 ++(define_expand "cmpsf"
1.17185 ++ [(set (cc0)
1.17186 ++ (compare:SF
1.17187 ++ (match_operand:SF 0 "general_operand" "")
1.17188 ++ (match_operand:SF 1 "general_operand" "")))]
1.17189 ++ "TARGET_HARD_FLOAT"
1.17190 ++ "{
1.17191 ++ rtx tmpreg;
1.17192 ++ if ( !REG_P(operands[0]) )
1.17193 ++ operands[0] = force_reg(SFmode, operands[0]);
1.17194 ++
1.17195 ++ if ( !REG_P(operands[1]) )
1.17196 ++ operands[1] = force_reg(SFmode, operands[1]);
1.17197 ++
1.17198 ++ avr32_compare_op0 = operands[0];
1.17199 ++ avr32_compare_op1 = operands[1];
1.17200 ++
1.17201 ++ emit_insn(gen_cmpsf_internal(operands[0], operands[1]));
1.17202 ++
1.17203 ++ tmpreg = gen_reg_rtx(SImode);
1.17204 ++ emit_insn(gen_fpcc_to_reg(tmpreg));
1.17205 ++ emit_insn(gen_reg_to_cc(tmpreg));
1.17206 ++
1.17207 ++ DONE;
1.17208 ++ }"
1.17209 ++)
1.17210 ++
1.17211 ++(define_insn "cmpsf_internal"
1.17212 ++ [(set (reg:CC FPCC_REGNUM)
1.17213 ++ (compare:CC
1.17214 ++ (match_operand:SF 0 "avr32_fp_register_operand" "f")
1.17215 ++ (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
1.17216 ++ "TARGET_HARD_FLOAT"
1.17217 ++ {
1.17218 ++ if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) )
1.17219 ++ return "fcmp.s\t%0, %1";
1.17220 ++ return "";
1.17221 ++ }
1.17222 ++ [(set_attr "length" "4")
1.17223 ++ (set_attr "type" "fcmps")
1.17224 ++ (set_attr "cc" "fpcompare")])
1.17225 ++
1.17226 ++(define_insn "fpcc_to_reg"
1.17227 ++ [(set (match_operand:SI 0 "register_operand" "=r")
1.17228 ++ (unspec:SI [(reg:CC FPCC_REGNUM)]
1.17229 ++ UNSPEC_FPCC_TO_REG))]
1.17230 ++ "TARGET_HARD_FLOAT"
1.17231 ++ "fmov.s\t%0, fsr"
1.17232 ++ [(set_attr "length" "4")
1.17233 ++ (set_attr "type" "fmvcpu")])
1.17234 ++
1.17235 ++(define_insn "reg_to_cc"
1.17236 ++ [(set (cc0)
1.17237 ++ (unspec:SI [(match_operand:SI 0 "register_operand" "r")]
1.17238 ++ UNSPEC_REG_TO_CC))]
1.17239 ++ "TARGET_HARD_FLOAT"
1.17240 ++ "musfr\t%0"
1.17241 ++ [(set_attr "length" "2")
1.17242 ++ (set_attr "type" "alu")
1.17243 ++ (set_attr "cc" "from_fpcc")])
1.17244 ++
1.17245 ++(define_insn "stm_fp"
1.17246 ++ [(unspec [(match_operand 0 "register_operand" "r")
1.17247 ++ (match_operand 1 "const_int_operand" "")
1.17248 ++ (match_operand 2 "const_int_operand" "")]
1.17249 ++ UNSPEC_STMFP)]
1.17250 ++ "TARGET_HARD_FLOAT"
1.17251 ++ {
1.17252 ++ int cop_reglist = INTVAL(operands[1]);
1.17253 ++
1.17254 ++ if (INTVAL(operands[2]) != 0)
1.17255 ++ return "stcm.w\tcp0, --%0, %C1";
1.17256 ++ else
1.17257 ++ return "stcm.w\tcp0, %0, %C1";
1.17258 ++
1.17259 ++ if ( cop_reglist & ~0xff ){
1.17260 ++ operands[1] = GEN_INT(cop_reglist & ~0xff);
1.17261 ++ if (INTVAL(operands[2]) != 0)
1.17262 ++ return "stcm.d\tcp0, --%0, %D1";
1.17263 ++ else
1.17264 ++ return "stcm.d\tcp0, %0, %D1";
1.17265 ++ }
1.17266 ++ }
1.17267 ++ [(set_attr "type" "fstm")
1.17268 ++ (set_attr "length" "4")
1.17269 ++ (set_attr "cc" "none")])
1.17270 +--- /dev/null
1.17271 ++++ b/gcc/config/avr32/lib1funcs.S
1.17272 +@@ -0,0 +1,2874 @@
1.17273 ++/* Macro for moving immediate value to register. */
1.17274 ++.macro mov_imm reg, imm
1.17275 ++.if (((\imm & 0xfffff) == \imm) || ((\imm | 0xfff00000) == \imm))
1.17276 ++ mov \reg, \imm
1.17277 ++#if __AVR32_UC__ >= 2
1.17278 ++.elseif ((\imm & 0xffff) == 0)
1.17279 ++ movh \reg, hi(\imm)
1.17280 ++
1.17281 ++#endif
1.17282 ++.else
1.17283 ++ mov \reg, lo(\imm)
1.17284 ++ orh \reg, hi(\imm)
1.17285 ++.endif
1.17286 ++.endm
1.17287 ++
1.17288 ++
1.17289 ++
1.17290 ++/* Adjust the unpacked double number if it is a subnormal number.
1.17291 ++ The exponent and mantissa pair are stored
1.17292 ++ in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in
1.17293 ++ the MSB is passed in [sign]. Needs two scratch
1.17294 ++ registers [scratch1] and [scratch2]. An adjusted and packed double float
1.17295 ++ is present in [mant_hi,mant_lo] after macro has executed */
1.17296 ++.macro adjust_subnormal_df exp, mant_lo, mant_hi, sign, scratch1, scratch2
1.17297 ++ /* We have an exponent which is <=0 indicating a subnormal number
1.17298 ++ As it should be stored as if the exponent was 1 (although the
1.17299 ++ exponent field is all zeros to indicate a subnormal number)
1.17300 ++ we have to shift down the mantissa to its correct position. */
1.17301 ++ neg \exp
1.17302 ++ sub \exp,-1 /* amount to shift down */
1.17303 ++ cp.w \exp,54
1.17304 ++ brlo 50f /* if more than 53 shift steps, the
1.17305 ++ entire mantissa will disappear
1.17306 ++ without any rounding to occur */
1.17307 ++ mov \mant_hi, 0
1.17308 ++ mov \mant_lo, 0
1.17309 ++ rjmp 52f
1.17310 ++50:
1.17311 ++ sub \exp,-10 /* do the shift to position the
1.17312 ++ mantissa at the same time
1.17313 ++ note! this does not include the
1.17314 ++ final 1 step shift to add the sign */
1.17315 ++
1.17316 ++ /* when shifting, save all shifted out bits in [scratch2]. we may need to
1.17317 ++ look at them to make correct rounding. */
1.17318 ++
1.17319 ++ rsub \scratch1,\exp,32 /* get inverted shift count */
1.17320 ++ cp.w \exp,32 /* handle shifts >= 32 separately */
1.17321 ++ brhs 51f
1.17322 ++
1.17323 ++ /* small (<32) shift amount, both words are part of the shift */
1.17324 ++ lsl \scratch2,\mant_lo,\scratch1 /* save bits to shift out from lsw*/
1.17325 ++ lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/
1.17326 ++ lsr \mant_lo,\mant_lo,\exp /* shift down lsw */
1.17327 ++ lsr \mant_hi,\mant_hi,\exp /* shift down msw */
1.17328 ++ or \mant_hi,\scratch1 /* add bits from msw with prepared lsw */
1.17329 ++ rjmp 50f
1.17330 ++
1.17331 ++ /* large (>=32) shift amount, only lsw will have bits left after shift.
1.17332 ++ note that shift operations will use ((shift count) mod 32) so
1.17333 ++ we do not need to subtract 32 from shift count. */
1.17334 ++51:
1.17335 ++ lsl \scratch2,\mant_hi,\scratch1 /* save bits to shift out from msw */
1.17336 ++ or \scratch2,\mant_lo /* also save all bits from lsw */
1.17337 ++ mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first") */
1.17338 ++ mov \mant_hi,0 /* clear msw */
1.17339 ++ lsr \mant_lo,\mant_lo,\exp /* make rest of shift inside lsw */
1.17340 ++
1.17341 ++50:
1.17342 ++ /* result is almost ready to return, except that least significant bit
1.17343 ++ and the part we already shifted out may cause the result to be
1.17344 ++ rounded */
1.17345 ++ bld \mant_lo,0 /* get bit to be shifted out */
1.17346 ++ brcc 51f /* if bit was 0, no rounding */
1.17347 ++
1.17348 ++ /* msb of part to remove is 1, so rounding depends on rest of bits */
1.17349 ++ tst \scratch2,\scratch2 /* get shifted out tail */
1.17350 ++ brne 50f /* if rest > 0, do round */
1.17351 ++ bld \mant_lo,1 /* we have to look at lsb in result */
1.17352 ++ brcc 51f /* if lsb is 0, don't round */
1.17353 ++
1.17354 ++50:
1.17355 ++ /* subnormal result requires rounding
1.17356 ++ rounding may cause subnormal to become smallest normal number
1.17357 ++ luckily, smallest normal number has exactly the representation
1.17358 ++ we got by rippling a one bit up from mantissa into exponent field. */
1.17359 ++ sub \mant_lo,-1
1.17360 ++ subcc \mant_hi,-1
1.17361 ++
1.17362 ++51:
1.17363 ++ /* shift and return packed double with correct sign */
1.17364 ++ rol \sign
1.17365 ++ ror \mant_hi
1.17366 ++ ror \mant_lo
1.17367 ++52:
1.17368 ++.endm
1.17369 ++
1.17370 ++
1.17371 ++/* Adjust subnormal single float number with exponent [exp]
1.17372 ++ and mantissa [mant] and round. */
1.17373 ++.macro adjust_subnormal_sf sf, exp, mant, sign, scratch
1.17374 ++ /* subnormal number */
1.17375 ++ rsub \exp,\exp, 1 /* shift amount */
1.17376 ++ cp.w \exp, 25
1.17377 ++ movhs \mant, 0
1.17378 ++ brhs 90f /* Return zero */
1.17379 ++ rsub \scratch, \exp, 32
1.17380 ++ lsl \scratch, \mant,\scratch/* Check if there are any bits set
1.17381 ++ in the bits discarded in the mantissa */
1.17382 ++ srne \scratch /* If so set the lsb of the shifted mantissa */
1.17383 ++ lsr \mant,\mant,\exp /* Shift the mantissa */
1.17384 ++ or \mant, \scratch /* Round lsb if any bits were shifted out */
1.17385 ++ /* Rounding : For explaination, see round_sf. */
1.17386 ++ mov \scratch, 0x7f /* Set rounding constant */
1.17387 ++ bld \mant, 8
1.17388 ++ subeq \scratch, -1 /* For odd numbers use rounding constant 0x80 */
1.17389 ++ add \mant, \scratch /* Add rounding constant to mantissa */
1.17390 ++ /* We can't overflow because mantissa is at least shifted one position
1.17391 ++ to the right so the implicit bit is zero. We can however get the implicit
1.17392 ++ bit set after rounding which means that we have the lowest normal number
1.17393 ++ but this is ok since this bit has the same position as the LSB of the
1.17394 ++ exponent */
1.17395 ++ lsr \sf, \mant, 7
1.17396 ++ /* Rotate in sign */
1.17397 ++ lsl \sign, 1
1.17398 ++ ror \sf
1.17399 ++90:
1.17400 ++.endm
1.17401 ++
1.17402 ++
1.17403 ++/* Round the unpacked df number with exponent [exp] and
1.17404 ++ mantissa [mant_hi, mant_lo]. Uses scratch register
1.17405 ++ [scratch] */
1.17406 ++.macro round_df exp, mant_lo, mant_hi, scratch
1.17407 ++ mov \scratch, 0x3ff /* Rounding constant */
1.17408 ++ bld \mant_lo,11 /* Check if lsb in the final result is
1.17409 ++ set */
1.17410 ++ subeq \scratch, -1 /* Adjust rounding constant to 0x400
1.17411 ++ if rounding 0.5 upwards */
1.17412 ++ add \mant_lo, \scratch /* Round */
1.17413 ++ acr \mant_hi /* If overflowing we know that
1.17414 ++ we have all zeros in the bits not
1.17415 ++ scaled out so we can leave them
1.17416 ++ but we must increase the exponent with
1.17417 ++ two since we had an implicit bit
1.17418 ++ which is lost + the extra overflow bit */
1.17419 ++ subcs \exp, -2 /* Update exponent */
1.17420 ++.endm
1.17421 ++
1.17422 ++/* Round single float number stored in [mant] and [exp] */
1.17423 ++.macro round_sf exp, mant, scratch
1.17424 ++ /* Round:
1.17425 ++ For 0.5 we round to nearest even integer
1.17426 ++ for all other cases we round to nearest integer.
1.17427 ++ This means that if the digit left of the "point" (.)
1.17428 ++ is 1 we can add 0x80 to the mantissa since the
1.17429 ++ corner case 0x180 will round up to 0x200. If the
1.17430 ++ digit left of the "point" is 0 we will have to
1.17431 ++ add 0x7f since this will give 0xff and hence a
1.17432 ++ truncation/rounding downwards for the corner
1.17433 ++ case when the 9 lowest bits are 0x080 */
1.17434 ++ mov \scratch, 0x7f /* Set rounding constant */
1.17435 ++ /* Check if the mantissa is even or odd */
1.17436 ++ bld \mant, 8
1.17437 ++ subeq \scratch, -1 /* Rounding constant should be 0x80 */
1.17438 ++ add \mant, \scratch
1.17439 ++ subcs \exp, -2 /* Adjust exponent if we overflowed */
1.17440 ++.endm
1.17441 ++
1.17442 ++
1.17443 ++
1.17444 ++/* Pack a single float number stored in [mant] and [exp]
1.17445 ++ into a single float number in [sf] */
1.17446 ++.macro pack_sf sf, exp, mant
1.17447 ++ bld \mant,31 /* implicit bit to z */
1.17448 ++ subne \exp,1 /* if subnormal (implicit bit 0)
1.17449 ++ adjust exponent to storage format */
1.17450 ++
1.17451 ++ lsr \sf, \mant, 7
1.17452 ++ bfins \sf, \exp, 24, 8
1.17453 ++.endm
1.17454 ++
1.17455 ++/* Pack exponent [exp] and mantissa [mant_hi, mant_lo]
1.17456 ++ into [df_hi, df_lo]. [df_hi] is shifted
1.17457 ++ one bit up so the sign bit can be shifted into it */
1.17458 ++
1.17459 ++.macro pack_df exp, mant_lo, mant_hi, df_lo, df_hi
1.17460 ++ bld \mant_hi,31 /* implicit bit to z */
1.17461 ++ subne \exp,1 /* if subnormal (implicit bit 0)
1.17462 ++ adjust exponent to storage format */
1.17463 ++
1.17464 ++ lsr \mant_lo,11 /* shift back lsw */
1.17465 ++ or \df_lo,\mant_lo,\mant_hi<<21 /* combine with low bits from msw */
1.17466 ++ lsl \mant_hi,1 /* get rid of implicit bit */
1.17467 ++ lsr \mant_hi,11 /* shift back msw except for one step*/
1.17468 ++ or \df_hi,\mant_hi,\exp<<21 /* combine msw with exponent */
1.17469 ++.endm
1.17470 ++
1.17471 ++/* Normalize single float number stored in [mant] and [exp]
1.17472 ++ using scratch register [scratch] */
1.17473 ++.macro normalize_sf exp, mant, scratch
1.17474 ++ /* Adjust exponent and mantissa */
1.17475 ++ clz \scratch, \mant
1.17476 ++ sub \exp, \scratch
1.17477 ++ lsl \mant, \mant, \scratch
1.17478 ++.endm
1.17479 ++
1.17480 ++/* Normalize the exponent and mantissa pair stored
1.17481 ++ in [mant_hi,mant_lo] and [exp]. Needs two scratch
1.17482 ++ registers [scratch1] and [scratch2]. */
1.17483 ++.macro normalize_df exp, mant_lo, mant_hi, scratch1, scratch2
1.17484 ++ clz \scratch1,\mant_hi /* Check if we have zeros in high bits */
1.17485 ++ breq 80f /* No need for scaling if no zeros in high bits */
1.17486 ++ brcs 81f /* Check for all zeros */
1.17487 ++
1.17488 ++ /* shift amount is smaller than 32, and involves both msw and lsw*/
1.17489 ++ rsub \scratch2,\scratch1,32 /* shift mantissa */
1.17490 ++ lsl \mant_hi,\mant_hi,\scratch1
1.17491 ++ lsr \scratch2,\mant_lo,\scratch2
1.17492 ++ or \mant_hi,\scratch2
1.17493 ++ lsl \mant_lo,\mant_lo,\scratch1
1.17494 ++ sub \exp,\scratch1 /* adjust exponent */
1.17495 ++ rjmp 80f /* Finished */
1.17496 ++81:
1.17497 ++ /* shift amount is greater than 32 */
1.17498 ++ clz \scratch1,\mant_lo /* shift mantissa */
1.17499 ++ movcs \scratch1, 0
1.17500 ++ subcc \scratch1,-32
1.17501 ++ lsl \mant_hi,\mant_lo,\scratch1
1.17502 ++ mov \mant_lo,0
1.17503 ++ sub \exp,\scratch1 /* adjust exponent */
1.17504 ++80:
1.17505 ++.endm
1.17506 ++
1.17507 ++
1.17508 ++/* Fast but approximate multiply of two 64-bit numbers to give a 64 bit result.
1.17509 ++ The multiplication of [al]x[bl] is discarded.
1.17510 ++ Operands in [ah], [al], [bh], [bl].
1.17511 ++ Scratch registers in [sh], [sl].
1.17512 ++ Returns results in registers [rh], [rl].*/
1.17513 ++.macro mul_approx_df ah, al, bh, bl, rh, rl, sh, sl
1.17514 ++ mulu.d \sl, \ah, \bl
1.17515 ++ macu.d \sl, \al, \bh
1.17516 ++ mulu.d \rl, \ah, \bh
1.17517 ++ add \rl, \sh
1.17518 ++ acr \rh
1.17519 ++.endm
1.17520 ++
1.17521 ++
1.17522 ++
1.17523 ++#if defined(L_avr32_f64_mul) || defined(L_avr32_f64_mul_fast)
1.17524 ++ .align 2
1.17525 ++#if defined(L_avr32_f64_mul)
1.17526 ++ .global __avr32_f64_mul
1.17527 ++ .type __avr32_f64_mul,@function
1.17528 ++__avr32_f64_mul:
1.17529 ++#else
1.17530 ++ .global __avr32_f64_mul_fast
1.17531 ++ .type __avr32_f64_mul_fast,@function
1.17532 ++__avr32_f64_mul_fast:
1.17533 ++#endif
1.17534 ++ or r12, r10, r11 << 1
1.17535 ++ breq __avr32_f64_mul_op1_zero
1.17536 ++
1.17537 ++#if defined(L_avr32_f64_mul)
1.17538 ++ pushm r4-r7, lr
1.17539 ++#else
1.17540 ++ stm --sp, r5,r6,r7,lr
1.17541 ++#endif
1.17542 ++
1.17543 ++#define AVR32_F64_MUL_OP1_INT_BITS 1
1.17544 ++#define AVR32_F64_MUL_OP2_INT_BITS 10
1.17545 ++#define AVR32_F64_MUL_RES_INT_BITS 11
1.17546 ++
1.17547 ++ /* op1 in {r11,r10}*/
1.17548 ++ /* op2 in {r9,r8}*/
1.17549 ++ eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
1.17550 ++
1.17551 ++ /* Unpack op1 to 1.63 format*/
1.17552 ++ /* exp: r7 */
1.17553 ++ /* sf: r11, r10 */
1.17554 ++ bfextu r7, r11, 20, 11 /* Extract exponent */
1.17555 ++
1.17556 ++ mov r5, 1
1.17557 ++
1.17558 ++ /* Check if normalization is needed */
1.17559 ++ breq __avr32_f64_mul_op1_subnormal /*If number is subnormal, normalize it */
1.17560 ++
1.17561 ++ lsl r11, (12-AVR32_F64_MUL_OP1_INT_BITS-1) /* Extract mantissa, leave room for implicit bit */
1.17562 ++ or r11, r11, r10>>(32-(12-AVR32_F64_MUL_OP1_INT_BITS-1))
1.17563 ++ lsl r10, (12-AVR32_F64_MUL_OP1_INT_BITS-1)
1.17564 ++ bfins r11, r5, 32 - (1 + AVR32_F64_MUL_OP1_INT_BITS), 1 + AVR32_F64_MUL_OP1_INT_BITS /* Insert implicit bit */
1.17565 ++
1.17566 ++
1.17567 ++22:
1.17568 ++ /* Unpack op2 to 10.54 format */
1.17569 ++ /* exp: r6 */
1.17570 ++ /* sf: r9, r8 */
1.17571 ++ bfextu r6, r9, 20, 11 /* Extract exponent */
1.17572 ++
1.17573 ++ /* Check if normalization is needed */
1.17574 ++ breq __avr32_f64_mul_op2_subnormal /*If number is subnormal, normalize it */
1.17575 ++
1.17576 ++ lsl r8, 1 /* Extract mantissa, leave room for implicit bit */
1.17577 ++ rol r9
1.17578 ++ bfins r9, r5, 32 - (1 + AVR32_F64_MUL_OP2_INT_BITS), 1 + AVR32_F64_MUL_OP2_INT_BITS /* Insert implicit bit */
1.17579 ++
1.17580 ++23:
1.17581 ++
1.17582 ++ /* Check if any operands are NaN or INF */
1.17583 ++ cp r7, 0x7ff
1.17584 ++ breq __avr32_f64_mul_op_nan_or_inf /* Check op1 for NaN or Inf */
1.17585 ++ cp r6, 0x7ff
1.17586 ++ breq __avr32_f64_mul_op_nan_or_inf /* Check op2 for NaN or Inf */
1.17587 ++
1.17588 ++
1.17589 ++ /* Calculate new exponent in r12*/
1.17590 ++ add r12, r7, r6
1.17591 ++ sub r12, (1023-1)
1.17592 ++
1.17593 ++#if defined(L_avr32_f64_mul)
1.17594 ++ /* Do the multiplication.
1.17595 ++ Place result in [r11, r10, r7, r6]. The result is in 11.117 format. */
1.17596 ++ mulu.d r4, r11, r8
1.17597 ++ macu.d r4, r10, r9
1.17598 ++ mulu.d r6, r10, r8
1.17599 ++ mulu.d r10, r11, r9
1.17600 ++ add r7, r4
1.17601 ++ adc r10, r10, r5
1.17602 ++ acr r11
1.17603 ++#else
1.17604 ++ /* Do the multiplication using approximate calculation. discard the al x bl
1.17605 ++ calculation.
1.17606 ++ Place result in [r11, r10, r7]. The result is in 11.85 format. */
1.17607 ++
1.17608 ++ /* Do the multiplication using approximate calculation.
1.17609 ++ Place result in r11, r10. Use r7, r6 as scratch registers */
1.17610 ++ mulu.d r6, r11, r8
1.17611 ++ macu.d r6, r10, r9
1.17612 ++ mulu.d r10, r11, r9
1.17613 ++ add r10, r7
1.17614 ++ acr r11
1.17615 ++#endif
1.17616 ++ /* Adjust exponent and mantissa */
1.17617 ++ /* [r12]:exp, [r11, r10]:mant [r7, r6]:sticky bits */
1.17618 ++ /* Mantissa may be of the format 00000000000.0xxx or 00000000000.1xxx. */
1.17619 ++ /* In the first case, shift one pos to left.*/
1.17620 ++ bld r11, 32-AVR32_F64_MUL_RES_INT_BITS-1
1.17621 ++ breq 0f
1.17622 ++ lsl r7, 1
1.17623 ++ rol r10
1.17624 ++ rol r11
1.17625 ++ sub r12, 1
1.17626 ++0:
1.17627 ++ cp r12, 0
1.17628 ++ brle __avr32_f64_mul_res_subnormal /*Result was subnormal.*/
1.17629 ++
1.17630 ++ /* Check for Inf. */
1.17631 ++ cp.w r12, 0x7ff
1.17632 ++ brge __avr32_f64_mul_res_inf
1.17633 ++
1.17634 ++ /* Insert exponent. */
1.17635 ++ bfins r11, r12, 20, 11
1.17636 ++
1.17637 ++ /* Result was not subnormal. Perform rounding. */
1.17638 ++ /* For the fast version we discard the sticky bits and always round
1.17639 ++ the halfwaycase up. */
1.17640 ++24:
1.17641 ++#if defined(L_avr32_f64_mul)
1.17642 ++ or r6, r6, r10 << 31 /* Or in parity bit into stickybits */
1.17643 ++ or r7, r7, r6 >> 1 /* Or together sticky and still make the msb
1.17644 ++ of r7 represent the halfway bit. */
1.17645 ++ eorh r7, 0x8000 /* Toggle halfway bit. */
1.17646 ++ /* We should now round up by adding one for the following cases:
1.17647 ++
1.17648 ++ halfway sticky|parity round-up
1.17649 ++ 0 x no
1.17650 ++ 1 0 no
1.17651 ++ 1 1 yes
1.17652 ++
1.17653 ++ Since we have inverted the halfway bit we can use the satu instruction
1.17654 ++ by saturating to 1 bit to implement this.
1.17655 ++ */
1.17656 ++ satu r7 >> 0, 1
1.17657 ++#else
1.17658 ++ lsr r7, 31
1.17659 ++#endif
1.17660 ++ add r10, r7
1.17661 ++ acr r11
1.17662 ++
1.17663 ++ /* Insert sign bit*/
1.17664 ++ bld lr, 31
1.17665 ++ bst r11, 31
1.17666 ++
1.17667 ++ /* Return result in [r11,r10] */
1.17668 ++#if defined(L_avr32_f64_mul)
1.17669 ++ popm r4-r7, pc
1.17670 ++#else
1.17671 ++ ldm sp++, r5, r6, r7,pc
1.17672 ++#endif
1.17673 ++
1.17674 ++
1.17675 ++__avr32_f64_mul_op1_subnormal:
1.17676 ++ andh r11, 0x000f /* Remove sign bit and exponent */
1.17677 ++ clz r12, r10 /* Count leading zeros in lsw */
1.17678 ++ clz r6, r11 /* Count leading zeros in msw */
1.17679 ++ subcs r12, -32 + AVR32_F64_MUL_OP1_INT_BITS
1.17680 ++ movcs r6, r12
1.17681 ++ subcc r6, AVR32_F64_MUL_OP1_INT_BITS
1.17682 ++ cp.w r6, 32
1.17683 ++ brge 0f
1.17684 ++
1.17685 ++ /* shifting involves both msw and lsw*/
1.17686 ++ rsub r12, r6, 32 /* shift mantissa */
1.17687 ++ lsl r11, r11, r6
1.17688 ++ lsr r12, r10, r12
1.17689 ++ or r11, r12
1.17690 ++ lsl r10, r10, r6
1.17691 ++ sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
1.17692 ++ sub r7, r6 /* adjust exponent */
1.17693 ++ rjmp 22b /* Finished */
1.17694 ++0:
1.17695 ++ /* msw is zero so only need to consider lsw */
1.17696 ++ lsl r11, r10, r6
1.17697 ++ breq __avr32_f64_mul_res_zero
1.17698 ++ mov r10, 0
1.17699 ++ sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
1.17700 ++ sub r7, r6 /* adjust exponent */
1.17701 ++ rjmp 22b
1.17702 ++
1.17703 ++
1.17704 ++__avr32_f64_mul_op2_subnormal:
1.17705 ++ andh r9, 0x000f /* Remove sign bit and exponent */
1.17706 ++ clz r12, r8 /* Count leading zeros in lsw */
1.17707 ++ clz r5, r9 /* Count leading zeros in msw */
1.17708 ++ subcs r12, -32 + AVR32_F64_MUL_OP2_INT_BITS
1.17709 ++ movcs r5, r12
1.17710 ++ subcc r5, AVR32_F64_MUL_OP2_INT_BITS
1.17711 ++ cp.w r5, 32
1.17712 ++ brge 0f
1.17713 ++
1.17714 ++ /* shifting involves both msw and lsw*/
1.17715 ++ rsub r12, r5, 32 /* shift mantissa */
1.17716 ++ lsl r9, r9, r5
1.17717 ++ lsr r12, r8, r12
1.17718 ++ or r9, r12
1.17719 ++ lsl r8, r8, r5
1.17720 ++ sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
1.17721 ++ sub r6, r5 /* adjust exponent */
1.17722 ++ rjmp 23b /* Finished */
1.17723 ++0:
1.17724 ++ /* msw is zero so only need to consider lsw */
1.17725 ++ lsl r9, r8, r5
1.17726 ++ breq __avr32_f64_mul_res_zero
1.17727 ++ mov r8, 0
1.17728 ++ sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
1.17729 ++ sub r6, r5 /* adjust exponent */
1.17730 ++ rjmp 23b
1.17731 ++
1.17732 ++
1.17733 ++__avr32_f64_mul_op_nan_or_inf:
1.17734 ++ /* Same code for OP1 and OP2*/
1.17735 ++ /* Since we are here, at least one of the OPs were NaN or INF*/
1.17736 ++ andh r9, 0x000f /* Remove sign bit and exponent */
1.17737 ++ andh r11, 0x000f /* Remove sign bit and exponent */
1.17738 ++ /* Merge the regs in each operand to check for zero*/
1.17739 ++ or r11, r10 /* op1 */
1.17740 ++ or r9, r8 /* op2 */
1.17741 ++ /* Check if op1 is NaN or INF */
1.17742 ++ cp r7, 0x7ff
1.17743 ++ brne __avr32_f64_mul_op1_not_naninf
1.17744 ++ /* op1 was NaN or INF.*/
1.17745 ++ cp r11, 0
1.17746 ++ brne __avr32_f64_mul_res_nan /* op1 was NaN. Result will be NaN*/
1.17747 ++ /*op1 was INF. check if op2 is NaN or INF*/
1.17748 ++ cp r6, 0x7ff
1.17749 ++ brne __avr32_f64_mul_res_inf /*op1 was INF, op2 was neither NaN nor INF*/
1.17750 ++ /* op1 is INF, op2 is either NaN or INF*/
1.17751 ++ cp r9, 0
1.17752 ++ breq __avr32_f64_mul_res_inf /*op2 was also INF*/
1.17753 ++ rjmp __avr32_f64_mul_res_nan /*op2 was NaN*/
1.17754 ++
1.17755 ++__avr32_f64_mul_op1_not_naninf:
1.17756 ++ /* op1 was not NaN nor INF. Then op2 must be NaN or INF*/
1.17757 ++ cp r9, 0
1.17758 ++ breq __avr32_f64_mul_res_inf /*op2 was INF, return INF*/
1.17759 ++ rjmp __avr32_f64_mul_res_nan /*else return NaN*/
1.17760 ++
1.17761 ++__avr32_f64_mul_res_subnormal:/* Multiply result was subnormal. */
1.17762 ++#if defined(L_avr32_f64_mul)
1.17763 ++ /* Check how much we must scale down the mantissa. */
1.17764 ++ neg r12
1.17765 ++ sub r12, -1 /* We do no longer have an implicit bit. */
1.17766 ++ satu r12 >> 0, 6 /* Saturate shift amount to max 63. */
1.17767 ++ cp.w r12, 32
1.17768 ++ brge 0f
1.17769 ++ /* Shift amount <32 */
1.17770 ++ rsub r8, r12, 32
1.17771 ++ or r6, r7
1.17772 ++ lsr r7, r7, r12
1.17773 ++ lsl r9, r10, r8
1.17774 ++ or r7, r9
1.17775 ++ lsr r10, r10, r12
1.17776 ++ lsl r9, r11, r8
1.17777 ++ or r10, r9
1.17778 ++ lsr r11, r11, r12
1.17779 ++ rjmp 24b
1.17780 ++0:
1.17781 ++ /* Shift amount >=32 */
1.17782 ++ rsub r8, r12, 32
1.17783 ++ moveq r9, 0
1.17784 ++ breq 0f
1.17785 ++ lsl r9, r11, r8
1.17786 ++0:
1.17787 ++ or r6, r7
1.17788 ++ or r6, r6, r10 << 1
1.17789 ++ lsr r10, r10, r12
1.17790 ++ or r7, r9, r10
1.17791 ++ lsr r10, r11, r12
1.17792 ++ mov r11, 0
1.17793 ++ rjmp 24b
1.17794 ++#else
1.17795 ++ /* Flush to zero for the fast version. */
1.17796 ++ mov r11, lr /*Get correct sign*/
1.17797 ++ andh r11, 0x8000, COH
1.17798 ++ mov r10, 0
1.17799 ++ ldm sp++, r5, r6, r7,pc
1.17800 ++#endif
1.17801 ++
1.17802 ++__avr32_f64_mul_res_zero:/* Multiply result is zero. */
1.17803 ++ mov r11, lr /*Get correct sign*/
1.17804 ++ andh r11, 0x8000, COH
1.17805 ++ mov r10, 0
1.17806 ++#if defined(L_avr32_f64_mul)
1.17807 ++ popm r4-r7, pc
1.17808 ++#else
1.17809 ++ ldm sp++, r5, r6, r7,pc
1.17810 ++#endif
1.17811 ++
1.17812 ++__avr32_f64_mul_res_nan: /* Return NaN. */
1.17813 ++ mov r11, -1
1.17814 ++ mov r10, -1
1.17815 ++#if defined(L_avr32_f64_mul)
1.17816 ++ popm r4-r7, pc
1.17817 ++#else
1.17818 ++ ldm sp++, r5, r6, r7,pc
1.17819 ++#endif
1.17820 ++
1.17821 ++__avr32_f64_mul_res_inf: /* Return INF. */
1.17822 ++ mov r11, 0xfff00000
1.17823 ++ bld lr, 31
1.17824 ++ bst r11, 31
1.17825 ++ mov r10, 0
1.17826 ++#if defined(L_avr32_f64_mul)
1.17827 ++ popm r4-r7, pc
1.17828 ++#else
1.17829 ++ ldm sp++, r5, r6, r7,pc
1.17830 ++#endif
1.17831 ++
1.17832 ++__avr32_f64_mul_op1_zero:
1.17833 ++ /* Get sign */
1.17834 ++ eor r11, r11, r9
1.17835 ++ andh r11, 0x8000, COH
1.17836 ++ /* Check if op2 is Inf or NaN. */
1.17837 ++ bfextu r12, r9, 20, 11
1.17838 ++ cp.w r12, 0x7ff
1.17839 ++ retne r12 /* Return 0.0 */
1.17840 ++ /* Return NaN */
1.17841 ++ mov r10, -1
1.17842 ++ mov r11, -1
1.17843 ++ ret r12
1.17844 ++
1.17845 ++
1.17846 ++
1.17847 ++#endif
1.17848 ++
1.17849 ++
1.17850 ++#if defined(L_avr32_f64_addsub) || defined(L_avr32_f64_addsub_fast)
1.17851 ++ .align 2
1.17852 ++
1.17853 ++__avr32_f64_sub_from_add:
1.17854 ++ /* Switch sign on op2 */
1.17855 ++ eorh r9, 0x8000
1.17856 ++
1.17857 ++#if defined(L_avr32_f64_addsub_fast)
1.17858 ++ .global __avr32_f64_sub_fast
1.17859 ++ .type __avr32_f64_sub_fast,@function
1.17860 ++__avr32_f64_sub_fast:
1.17861 ++#else
1.17862 ++ .global __avr32_f64_sub
1.17863 ++ .type __avr32_f64_sub,@function
1.17864 ++__avr32_f64_sub:
1.17865 ++#endif
1.17866 ++
1.17867 ++ /* op1 in {r11,r10}*/
1.17868 ++ /* op2 in {r9,r8}*/
1.17869 ++
1.17870 ++#if defined(L_avr32_f64_addsub_fast)
1.17871 ++ /* If op2 is zero just return op1 */
1.17872 ++ or r12, r8, r9 << 1
1.17873 ++ reteq r12
1.17874 ++#endif
1.17875 ++
1.17876 ++ /* Check signs */
1.17877 ++ eor r12, r11, r9
1.17878 ++ /* Different signs, use addition. */
1.17879 ++ brmi __avr32_f64_add_from_sub
1.17880 ++
1.17881 ++ stm --sp, r5, r6, r7, lr
1.17882 ++
1.17883 ++ /* Get sign of op1 into r12 */
1.17884 ++ mov r12, r11
1.17885 ++ andh r12, 0x8000, COH
1.17886 ++
1.17887 ++ /* Remove sign from operands */
1.17888 ++ cbr r11, 31
1.17889 ++ cbr r9, 31
1.17890 ++
1.17891 ++ /* Put the largest number in [r11, r10]
1.17892 ++ and the smallest number in [r9, r8] */
1.17893 ++ cp r10, r8
1.17894 ++ cpc r11, r9
1.17895 ++ brhs 1f /* Skip swap if operands already correctly ordered*/
1.17896 ++ /* Operands were not correctly ordered, swap them*/
1.17897 ++ mov r7, r11
1.17898 ++ mov r11, r9
1.17899 ++ mov r9, r7
1.17900 ++ mov r7, r10
1.17901 ++ mov r10, r8
1.17902 ++ mov r8, r7
1.17903 ++ eorh r12, 0x8000 /* Invert sign in r12*/
1.17904 ++1:
1.17905 ++ /* Unpack largest operand - opH */
1.17906 ++ /* exp: r7 */
1.17907 ++ /* sf: r11, r10 */
1.17908 ++ lsr r7, r11, 20 /* Extract exponent */
1.17909 ++ lsl r11, 11 /* Extract mantissa, leave room for implicit bit */
1.17910 ++ or r11, r11, r10>>21
1.17911 ++ lsl r10, 11
1.17912 ++ sbr r11, 31 /* Insert implicit bit */
1.17913 ++
1.17914 ++
1.17915 ++ /* Unpack smallest operand - opL */
1.17916 ++ /* exp: r6 */
1.17917 ++ /* sf: r9, r8 */
1.17918 ++ lsr r6, r9, 20 /* Extract exponent */
1.17919 ++ breq __avr32_f64_sub_opL_subnormal /* If either zero or subnormal */
1.17920 ++ lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
1.17921 ++ or r9, r9, r8>>21
1.17922 ++ lsl r8, 11
1.17923 ++ sbr r9, 31 /* Insert implicit bit */
1.17924 ++
1.17925 ++
1.17926 ++__avr32_f64_sub_opL_subnormal_done:
1.17927 ++ /* opH is NaN or Inf. */
1.17928 ++ cp.w r7, 0x7ff
1.17929 ++ breq __avr32_f64_sub_opH_nan_or_inf
1.17930 ++
1.17931 ++ /* Get shift amount to scale mantissa of op2. */
1.17932 ++ rsub r6, r7
1.17933 ++ breq __avr32_f64_sub_shift_done /* No need to shift, exponents are equal*/
1.17934 ++
1.17935 ++ /* Scale mantissa [r9, r8] with amount [r6].
1.17936 ++ Uses scratch registers [r5] and [lr].
1.17937 ++ In IEEE mode:Must not forget the sticky bits we intend to shift out. */
1.17938 ++
1.17939 ++ rsub r5,r6,32 /* get (32 - shift count)
1.17940 ++ (if shift count > 32 we get a
1.17941 ++ negative value, but that will
1.17942 ++ work as well in the code below.) */
1.17943 ++
1.17944 ++ cp.w r6,32 /* handle shifts >= 32 separately */
1.17945 ++ brhs __avr32_f64_sub_longshift
1.17946 ++
1.17947 ++ /* small (<32) shift amount, both words are part of the shift
1.17948 ++ first remember whether part that is lost contains any 1 bits ... */
1.17949 ++ lsl lr,r8,r5 /* shift away bits that are part of
1.17950 ++ final mantissa. only part that goes
1.17951 ++ to lr are bits that will be lost */
1.17952 ++
1.17953 ++ /* ... and now to the actual shift */
1.17954 ++ lsl r5,r9,r5 /* get bits from msw destined for lsw*/
1.17955 ++ lsr r8,r8,r6 /* shift down lsw of mantissa */
1.17956 ++ lsr r9,r9,r6 /* shift down msw of mantissa */
1.17957 ++ or r8,r5 /* combine these bits with prepared lsw*/
1.17958 ++#if defined(L_avr32_f64_addsub)
1.17959 ++ cp.w lr,0 /* if any '1' bit in part we lost ...*/
1.17960 ++ srne lr
1.17961 ++ or r8, lr /* ... we need to set sticky bit*/
1.17962 ++#endif
1.17963 ++
1.17964 ++__avr32_f64_sub_shift_done:
1.17965 ++ /* Now subtract the mantissas. */
1.17966 ++ sub r10, r8
1.17967 ++ sbc r11, r11, r9
1.17968 ++
1.17969 ++ /* Normalize the exponent and mantissa pair stored in
1.17970 ++ [r11,r10] and exponent in [r7]. Needs two scratch registers [r6] and [lr]. */
1.17971 ++ clz r6,r11 /* Check if we have zeros in high bits */
1.17972 ++ breq __avr32_f64_sub_longnormalize_done /* No need for scaling if no zeros in high bits */
1.17973 ++ brcs __avr32_f64_sub_longnormalize
1.17974 ++
1.17975 ++
1.17976 ++ /* shift amount is smaller than 32, and involves both msw and lsw*/
1.17977 ++ rsub lr,r6,32 /* shift mantissa */
1.17978 ++ lsl r11,r11,r6
1.17979 ++ lsr lr,r10,lr
1.17980 ++ or r11,lr
1.17981 ++ lsl r10,r10,r6
1.17982 ++
1.17983 ++ sub r7,r6 /* adjust exponent */
1.17984 ++ brle __avr32_f64_sub_subnormal_result
1.17985 ++__avr32_f64_sub_longnormalize_done:
1.17986 ++
1.17987 ++#if defined(L_avr32_f64_addsub)
1.17988 ++ /* Insert the bits we will remove from the mantissa r9[31:21] */
1.17989 ++ lsl r9, r10, (32 - 11)
1.17990 ++#else
1.17991 ++ /* Keep the last bit shifted out. */
1.17992 ++ bfextu r9, r10, 10, 1
1.17993 ++#endif
1.17994 ++
1.17995 ++ /* Pack final result*/
1.17996 ++ /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
1.17997 ++ /* Result in [r11,r10] */
1.17998 ++ /* Insert mantissa */
1.17999 ++ lsr r10, 11
1.18000 ++ or r10, r10, r11<<21
1.18001 ++ lsr r11, 11
1.18002 ++ /* Insert exponent and sign bit*/
1.18003 ++ bfins r11, r7, 20, 11
1.18004 ++ or r11, r12
1.18005 ++
1.18006 ++ /* Round */
1.18007 ++__avr32_f64_sub_round:
1.18008 ++#if defined(L_avr32_f64_addsub)
1.18009 ++ mov_imm r7, 0x80000000
1.18010 ++ bld r10, 0
1.18011 ++ subne r7, -1
1.18012 ++
1.18013 ++ cp.w r9, r7
1.18014 ++ srhs r9
1.18015 ++#endif
1.18016 ++ add r10, r9
1.18017 ++ acr r11
1.18018 ++
1.18019 ++ /* Return result in [r11,r10] */
1.18020 ++ ldm sp++, r5, r6, r7,pc
1.18021 ++
1.18022 ++
1.18023 ++
1.18024 ++__avr32_f64_sub_opL_subnormal:
1.18025 ++ /* Extract the of mantissa */
1.18026 ++ lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
1.18027 ++ or r9, r9, r8>>21
1.18028 ++ lsl r8, 11
1.18029 ++
1.18030 ++ /* Set exponent to 1 if we do not have a zero. */
1.18031 ++ or lr, r9, r8
1.18032 ++ movne r6,1
1.18033 ++
1.18034 ++ /* Check if opH is also subnormal. If so, clear implicit bit in r11*/
1.18035 ++ rsub lr, r7, 0
1.18036 ++ moveq r7,1
1.18037 ++ bst r11, 31
1.18038 ++
1.18039 ++ /* Check if op1 is zero, if so set exponent to 0. */
1.18040 ++ or lr, r11, r10
1.18041 ++ moveq r7,0
1.18042 ++
1.18043 ++ rjmp __avr32_f64_sub_opL_subnormal_done
1.18044 ++
1.18045 ++__avr32_f64_sub_opH_nan_or_inf:
1.18046 ++ /* Check if opH is NaN, if so return NaN */
1.18047 ++ cbr r11, 31
1.18048 ++ or lr, r11, r10
1.18049 ++ brne __avr32_f64_sub_return_nan
1.18050 ++
1.18051 ++ /* opH is Inf. */
1.18052 ++ /* Check if opL is Inf. or NaN */
1.18053 ++ cp.w r6, 0x7ff
1.18054 ++ breq __avr32_f64_sub_return_nan
1.18055 ++ /* Return infinity with correct sign. */
1.18056 ++ or r11, r12, r7 << 20
1.18057 ++ ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
1.18058 ++__avr32_f64_sub_return_nan:
1.18059 ++ mov r10, -1 /* Generate NaN in r11, r10 */
1.18060 ++ mov r11, -1
1.18061 ++ ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
1.18062 ++
1.18063 ++
1.18064 ++__avr32_f64_sub_subnormal_result:
1.18065 ++#if defined(L_avr32_f64_addsub)
1.18066 ++ /* Check how much we must scale down the mantissa. */
1.18067 ++ neg r7
1.18068 ++ sub r7, -1 /* We do no longer have an implicit bit. */
1.18069 ++ satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
1.18070 ++ cp.w r7, 32
1.18071 ++ brge 0f
1.18072 ++ /* Shift amount <32 */
1.18073 ++ rsub r8, r7, 32
1.18074 ++ lsl r9, r10, r8
1.18075 ++ srne r6
1.18076 ++ lsr r10, r10, r7
1.18077 ++ or r10, r6 /* Sticky bit from the
1.18078 ++ part that was shifted out. */
1.18079 ++ lsl r9, r11, r8
1.18080 ++ or r10, r10, r9
1.18081 ++ lsr r11, r10, r7
1.18082 ++ /* Set exponent */
1.18083 ++ mov r7, 0
1.18084 ++ rjmp __avr32_f64_sub_longnormalize_done
1.18085 ++0:
1.18086 ++ /* Shift amount >=32 */
1.18087 ++ rsub r8, r7, 64
1.18088 ++ lsl r9, r11, r8
1.18089 ++ or r9, r10
1.18090 ++ srne r6
1.18091 ++ lsr r10, r11, r7
1.18092 ++ or r10, r6 /* Sticky bit from the
1.18093 ++ part that was shifted out. */
1.18094 ++ mov r11, 0
1.18095 ++ /* Set exponent */
1.18096 ++ mov r7, 0
1.18097 ++ rjmp __avr32_f64_sub_longnormalize_done
1.18098 ++#else
1.18099 ++ /* Just flush subnormals to zero. */
1.18100 ++ mov r10, 0
1.18101 ++ mov r11, 0
1.18102 ++#endif
1.18103 ++ ldm sp++, r5, r6, r7, pc
1.18104 ++
1.18105 ++__avr32_f64_sub_longshift:
1.18106 ++ /* large (>=32) shift amount, only lsw will have bits left after shift.
1.18107 ++ note that shift operations will use ((shift count=r6) mod 32) so
1.18108 ++ we do not need to subtract 32 from shift count. */
1.18109 ++ /* Saturate the shift amount to 63. If the amount
1.18110 ++ is any larger op2 is insignificant. */
1.18111 ++ satu r6 >> 0, 6
1.18112 ++
1.18113 ++#if defined(L_avr32_f64_addsub)
1.18114 ++ /* first remember whether part that is lost contains any 1 bits ... */
1.18115 ++ moveq lr, r8 /* If shift amount is 32, no bits from msw are lost. */
1.18116 ++ breq 0f
1.18117 ++ lsl lr,r9,r5 /* save all lost bits from msw */
1.18118 ++ or lr,r8 /* also save lost bits (all) from lsw
1.18119 ++ now lr != 0 if we lose any bits */
1.18120 ++#endif
1.18121 ++0:
1.18122 ++ /* ... and now to the actual shift */
1.18123 ++ lsr r8,r9,r6 /* Move msw to lsw and shift. */
1.18124 ++ mov r9,0 /* clear msw */
1.18125 ++#if defined(L_avr32_f64_addsub)
1.18126 ++ cp.w lr,0 /* if any '1' bit in part we lost ...*/
1.18127 ++ srne lr
1.18128 ++ or r8, lr /* ... we need to set sticky bit*/
1.18129 ++#endif
1.18130 ++ rjmp __avr32_f64_sub_shift_done
1.18131 ++
1.18132 ++__avr32_f64_sub_longnormalize:
1.18133 ++ /* shift amount is greater than 32 */
1.18134 ++ clz r6,r10 /* shift mantissa */
1.18135 ++ /* If the resulting mantissa is zero the result is
1.18136 ++ zero so force exponent to zero. */
1.18137 ++ movcs r7, 0
1.18138 ++ movcs r6, 0
1.18139 ++ movcs r12, 0 /* Also clear sign bit. A zero result from subtraction
1.18140 ++ always is +0.0 */
1.18141 ++ subcc r6,-32
1.18142 ++ lsl r11,r10,r6
1.18143 ++ mov r10,0
1.18144 ++ sub r7,r6 /* adjust exponent */
1.18145 ++ brle __avr32_f64_sub_subnormal_result
1.18146 ++ rjmp __avr32_f64_sub_longnormalize_done
1.18147 ++
1.18148 ++
1.18149 ++
1.18150 ++ .align 2
1.18151 ++__avr32_f64_add_from_sub:
1.18152 ++ /* Switch sign on op2 */
1.18153 ++ eorh r9, 0x8000
1.18154 ++
1.18155 ++#if defined(L_avr32_f64_addsub_fast)
1.18156 ++ .global __avr32_f64_add_fast
1.18157 ++ .type __avr32_f64_add_fast,@function
1.18158 ++__avr32_f64_add_fast:
1.18159 ++#else
1.18160 ++ .global __avr32_f64_add
1.18161 ++ .type __avr32_f64_add,@function
1.18162 ++__avr32_f64_add:
1.18163 ++#endif
1.18164 ++
1.18165 ++ /* op1 in {r11,r10}*/
1.18166 ++ /* op2 in {r9,r8}*/
1.18167 ++
1.18168 ++#if defined(L_avr32_f64_addsub_fast)
1.18169 ++ /* If op2 is zero just return op1 */
1.18170 ++ or r12, r8, r9 << 1
1.18171 ++ reteq r12
1.18172 ++#endif
1.18173 ++
1.18174 ++ /* Check signs */
1.18175 ++ eor r12, r11, r9
1.18176 ++ /* Different signs, use subtraction. */
1.18177 ++ brmi __avr32_f64_sub_from_add
1.18178 ++
1.18179 ++ stm --sp, r5, r6, r7, lr
1.18180 ++
1.18181 ++ /* Get sign of op1 into r12 */
1.18182 ++ mov r12, r11
1.18183 ++ andh r12, 0x8000, COH
1.18184 ++
1.18185 ++ /* Remove sign from operands */
1.18186 ++ cbr r11, 31
1.18187 ++ cbr r9, 31
1.18188 ++
1.18189 ++ /* Put the number with the largest exponent in [r11, r10]
1.18190 ++ and the number with the smallest exponent in [r9, r8] */
1.18191 ++ cp r11, r9
1.18192 ++ brhs 1f /* Skip swap if operands already correctly ordered */
1.18193 ++ /* Operands were not correctly ordered, swap them */
1.18194 ++ mov r7, r11
1.18195 ++ mov r11, r9
1.18196 ++ mov r9, r7
1.18197 ++ mov r7, r10
1.18198 ++ mov r10, r8
1.18199 ++ mov r8, r7
1.18200 ++1:
1.18201 ++ mov lr, 0 /* Set sticky bits to zero */
1.18202 ++ /* Unpack largest operand - opH */
1.18203 ++ /* exp: r7 */
1.18204 ++ /* sf: r11, r10 */
1.18205 ++ bfextu R7, R11, 20, 11 /* Extract exponent */
1.18206 ++ bfextu r11, r11, 0, 20 /* Extract mantissa */
1.18207 ++ sbr r11, 20 /* Insert implicit bit */
1.18208 ++
1.18209 ++ /* Unpack smallest operand - opL */
1.18210 ++ /* exp: r6 */
1.18211 ++ /* sf: r9, r8 */
1.18212 ++ bfextu R6, R9, 20, 11 /* Extract exponent */
1.18213 ++ breq __avr32_f64_add_op2_subnormal
1.18214 ++ bfextu r9, r9, 0, 20 /* Extract mantissa */
1.18215 ++ sbr r9, 20 /* Insert implicit bit */
1.18216 ++
1.18217 ++2:
1.18218 ++ /* opH is NaN or Inf. */
1.18219 ++ cp.w r7, 0x7ff
1.18220 ++ breq __avr32_f64_add_opH_nan_or_inf
1.18221 ++
1.18222 ++ /* Get shift amount to scale mantissa of op2. */
1.18223 ++ rsub r6, r7
1.18224 ++ breq __avr32_f64_add_shift_done /* No need to shift, exponents are equal*/
1.18225 ++
1.18226 ++ /* Scale mantissa [r9, r8] with amount [r6].
1.18227 ++ Uses scratch registers [r5] and [lr].
1.18228 ++ In IEEE mode:Must not forget the sticky bits we intend to shift out. */
1.18229 ++ rsub r5,r6,32 /* get (32 - shift count)
1.18230 ++ (if shift count > 32 we get a
1.18231 ++ negative value, but that will
1.18232 ++ work as well in the code below.) */
1.18233 ++
1.18234 ++ cp.w r6,32 /* handle shifts >= 32 separately */
1.18235 ++ brhs __avr32_f64_add_longshift
1.18236 ++
1.18237 ++ /* small (<32) shift amount, both words are part of the shift
1.18238 ++ first remember whether part that is lost contains any 1 bits ... */
1.18239 ++ lsl lr,r8,r5 /* shift away bits that are part of
1.18240 ++ final mantissa. only part that goes
1.18241 ++ to lr are bits that will be lost */
1.18242 ++
1.18243 ++ /* ... and now to the actual shift */
1.18244 ++ lsl r5,r9,r5 /* get bits from msw destined for lsw*/
1.18245 ++ lsr r8,r8,r6 /* shift down lsw of mantissa */
1.18246 ++ lsr r9,r9,r6 /* shift down msw of mantissa */
1.18247 ++ or r8,r5 /* combine these bits with prepared lsw*/
1.18248 ++
1.18249 ++__avr32_f64_add_shift_done:
1.18250 ++ /* Now add the mantissas. */
1.18251 ++ add r10, r8
1.18252 ++ adc r11, r11, r9
1.18253 ++
1.18254 ++ /* Check if we overflowed. */
1.18255 ++ bld r11, 21
1.18256 ++ breq __avr32_f64_add_res_of:
1.18257 ++
1.18258 ++__avr32_f64_add_res_of_done:
1.18259 ++
1.18260 ++ /* Pack final result*/
1.18261 ++ /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
1.18262 ++ /* Result in [r11,r10] */
1.18263 ++ /* Insert exponent and sign bit*/
1.18264 ++ bfins r11, r7, 20, 11
1.18265 ++ or r11, r12
1.18266 ++
1.18267 ++ /* Round */
1.18268 ++__avr32_f64_add_round:
1.18269 ++#if defined(L_avr32_f64_addsub)
1.18270 ++ bfextu r12, r10, 0, 1 /* Extract parity bit.*/
1.18271 ++ or lr, r12 /* or it together with the sticky bits. */
1.18272 ++ eorh lr, 0x8000 /* Toggle round bit. */
1.18273 ++ /* We should now round up by adding one for the following cases:
1.18274 ++
1.18275 ++ halfway sticky|parity round-up
1.18276 ++ 0 x no
1.18277 ++ 1 0 no
1.18278 ++ 1 1 yes
1.18279 ++
1.18280 ++ Since we have inverted the halfway bit we can use the satu instruction
1.18281 ++ by saturating to 1 bit to implement this.
1.18282 ++ */
1.18283 ++ satu lr >> 0, 1
1.18284 ++#else
1.18285 ++ lsr lr, 31
1.18286 ++#endif
1.18287 ++ add r10, lr
1.18288 ++ acr r11
1.18289 ++
1.18290 ++ /* Return result in [r11,r10] */
1.18291 ++ ldm sp++, r5, r6, r7,pc
1.18292 ++
1.18293 ++
1.18294 ++__avr32_f64_add_opH_nan_or_inf:
1.18295 ++ /* Check if opH is NaN, if so return NaN */
1.18296 ++ cbr r11, 20
1.18297 ++ or lr, r11, r10
1.18298 ++ brne __avr32_f64_add_return_nan
1.18299 ++
1.18300 ++ /* opH is Inf. */
1.18301 ++ /* Check if opL is Inf. or NaN */
1.18302 ++ cp.w r6, 0x7ff
1.18303 ++ breq __avr32_f64_add_opL_nan_or_inf
1.18304 ++ ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
1.18305 ++__avr32_f64_add_opL_nan_or_inf:
1.18306 ++ cbr r9, 20
1.18307 ++ or lr, r9, r8
1.18308 ++ brne __avr32_f64_add_return_nan
1.18309 ++ mov r10, 0 /* Generate Inf in r11, r10 */
1.18310 ++ mov_imm r11, 0x7ff00000
1.18311 ++ ldm sp++, r5, r6, r7, pc/* opL Inf, return Inf */
1.18312 ++__avr32_f64_add_return_nan:
1.18313 ++ mov r10, -1 /* Generate NaN in r11, r10 */
1.18314 ++ mov r11, -1
1.18315 ++ ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
1.18316 ++
1.18317 ++
1.18318 ++__avr32_f64_add_longshift:
1.18319 ++ /* large (>=32) shift amount, only lsw will have bits left after shift.
1.18320 ++ note that shift operations will use ((shift count=r6) mod 32) so
1.18321 ++ we do not need to subtract 32 from shift count. */
1.18322 ++ /* Saturate the shift amount to 63. If the amount
1.18323 ++ is any larger op2 is insignificant. */
1.18324 ++ satu r6 >> 0, 6
1.18325 ++ /* If shift amount is 32 there are no bits from the msw that are lost. */
1.18326 ++ moveq lr, r8
1.18327 ++ breq 0f
1.18328 ++ /* first remember whether part that is lost contains any 1 bits ... */
1.18329 ++ lsl lr,r9,r5 /* save all lost bits from msw */
1.18330 ++#if defined(L_avr32_f64_addsub)
1.18331 ++ cp.w r8, 0
1.18332 ++ srne r8
1.18333 ++ or lr,r8 /* also save lost bits (all) from lsw
1.18334 ++ now lr != 0 if we lose any bits */
1.18335 ++#endif
1.18336 ++0:
1.18337 ++ /* ... and now to the actual shift */
1.18338 ++ lsr r8,r9,r6 /* msw -> lsw and make rest of shift inside lsw*/
1.18339 ++ mov r9,0 /* clear msw */
1.18340 ++ rjmp __avr32_f64_add_shift_done
1.18341 ++
1.18342 ++__avr32_f64_add_res_of:
1.18343 ++ /* We overflowed. Scale down mantissa by shifting right one position. */
1.18344 ++ or lr, lr, lr << 1 /* Remember stickybits*/
1.18345 ++ lsr r11, 1
1.18346 ++ ror r10
1.18347 ++ ror lr
1.18348 ++ sub r7, -1 /* Increment exponent */
1.18349 ++
1.18350 ++ /* Clear mantissa to set result to Inf if the exponent is 255. */
1.18351 ++ cp.w r7, 0x7ff
1.18352 ++ moveq r10, 0
1.18353 ++ moveq r11, 0
1.18354 ++ moveq lr, 0
1.18355 ++ rjmp __avr32_f64_add_res_of_done
1.18356 ++
1.18357 ++__avr32_f64_add_op2_subnormal:
1.18358 ++ /* Set epxponent to 1 */
1.18359 ++ mov r6, 1
1.18360 ++
1.18361 ++ /* Check if op2 is also subnormal. */
1.18362 ++ cp.w r7, 0
1.18363 ++ brne 2b
1.18364 ++
1.18365 ++ cbr r11, 20
1.18366 ++ /* Both operands are subnormal. Just addd the mantissas
1.18367 ++ and the exponent will automatically be set to 1 if
1.18368 ++ we overflow into a normal number. */
1.18369 ++ add r10, r8
1.18370 ++ adc r11, r11, r9
1.18371 ++
1.18372 ++ /* Add sign bit */
1.18373 ++ or r11, r12
1.18374 ++
1.18375 ++ /* Return result in [r11,r10] */
1.18376 ++ ldm sp++, r5, r6, r7,pc
1.18377 ++
1.18378 ++
1.18379 ++
1.18380 ++#endif
1.18381 ++
1.18382 ++#ifdef L_avr32_f64_to_u32
1.18383 ++ /* This goes into L_fixdfsi */
1.18384 ++#endif
1.18385 ++
1.18386 ++
1.18387 ++#ifdef L_avr32_f64_to_s32
1.18388 ++ .global __avr32_f64_to_u32
1.18389 ++ .type __avr32_f64_to_u32,@function
1.18390 ++__avr32_f64_to_u32:
1.18391 ++ cp.w r11, 0
1.18392 ++ retmi 0 /* Negative returns 0 */
1.18393 ++
1.18394 ++ /* Fallthrough to df to signed si conversion */
1.18395 ++ .global __avr32_f64_to_s32
1.18396 ++ .type __avr32_f64_to_s32,@function
1.18397 ++__avr32_f64_to_s32:
1.18398 ++ lsl r12,r11,1
1.18399 ++ lsr r12,21 /* extract exponent*/
1.18400 ++ sub r12,1023 /* convert to unbiased exponent.*/
1.18401 ++ retlo 0 /* too small exponent implies zero. */
1.18402 ++
1.18403 ++1:
1.18404 ++ rsub r12,r12,31 /* shift count = 31 - exponent */
1.18405 ++ mov r9,r11 /* save sign for later...*/
1.18406 ++ lsl r11,11 /* remove exponent and sign*/
1.18407 ++ sbr r11,31 /* add implicit bit*/
1.18408 ++ or r11,r11,r10>>21 /* get rest of bits from lsw of double */
1.18409 ++ lsr r11,r11,r12 /* shift down mantissa to final place */
1.18410 ++ lsl r9,1 /* sign -> carry */
1.18411 ++ retcc r11 /* if positive, we are done */
1.18412 ++ neg r11 /* if negative float, negate result */
1.18413 ++ ret r11
1.18414 ++
1.18415 ++#endif /* L_fixdfsi*/
1.18416 ++
1.18417 ++#ifdef L_avr32_f64_to_u64
1.18418 ++ /* Actual function is in L_fixdfdi */
1.18419 ++#endif
1.18420 ++
1.18421 ++#ifdef L_avr32_f64_to_s64
1.18422 ++ .global __avr32_f64_to_u64
1.18423 ++ .type __avr32_f64_to_u64,@function
1.18424 ++__avr32_f64_to_u64:
1.18425 ++ cp.w r11,0
1.18426 ++ /* Negative numbers return zero */
1.18427 ++ movmi r10, 0
1.18428 ++ movmi r11, 0
1.18429 ++ retmi r11
1.18430 ++
1.18431 ++
1.18432 ++
1.18433 ++ /* Fallthrough */
1.18434 ++ .global __avr32_f64_to_s64
1.18435 ++ .type __avr32_f64_to_s64,@function
1.18436 ++__avr32_f64_to_s64:
1.18437 ++ lsl r9,r11,1
1.18438 ++ lsr r9,21 /* get exponent*/
1.18439 ++ sub r9,1023 /* convert to correct range*/
1.18440 ++ /* Return zero if exponent to small */
1.18441 ++ movlo r10, 0
1.18442 ++ movlo r11, 0
1.18443 ++ retlo r11
1.18444 ++
1.18445 ++ mov r8,r11 /* save sign for later...*/
1.18446 ++1:
1.18447 ++ lsl r11,11 /* remove exponent */
1.18448 ++ sbr r11,31 /* add implicit bit*/
1.18449 ++ or r11,r11,r10>>21 /* get rest of bits from lsw of double*/
1.18450 ++ lsl r10,11 /* align lsw correctly as well */
1.18451 ++ rsub r9,r9,63 /* shift count = 63 - exponent */
1.18452 ++ breq 1f
1.18453 ++
1.18454 ++ cp.w r9,32 /* is shift count more than one reg? */
1.18455 ++ brhs 0f
1.18456 ++
1.18457 ++ mov r12,r11 /* save msw */
1.18458 ++ lsr r10,r10,r9 /* small shift count, shift down lsw */
1.18459 ++ lsr r11,r11,r9 /* small shift count, shift down msw */
1.18460 ++ rsub r9,r9,32 /* get 32-size of shifted out tail */
1.18461 ++ lsl r12,r12,r9 /* align part to move from msw to lsw */
1.18462 ++ or r10,r12 /* combine to get new lsw */
1.18463 ++ rjmp 1f
1.18464 ++
1.18465 ++0:
1.18466 ++ lsr r10,r11,r9 /* large shift count,only lsw get bits
1.18467 ++ note that shift count is modulo 32*/
1.18468 ++ mov r11,0 /* msw will be 0 */
1.18469 ++
1.18470 ++1:
1.18471 ++ lsl r8,1 /* sign -> carry */
1.18472 ++ retcc r11 /* if positive, we are done */
1.18473 ++
1.18474 ++ neg r11 /* if negative float, negate result */
1.18475 ++ neg r10
1.18476 ++ scr r11
1.18477 ++ ret r11
1.18478 ++
1.18479 ++#endif
1.18480 ++
1.18481 ++#ifdef L_avr32_u32_to_f64
1.18482 ++ /* Code located in L_floatsidf */
1.18483 ++#endif
1.18484 ++
1.18485 ++#ifdef L_avr32_s32_to_f64
1.18486 ++ .global __avr32_u32_to_f64
1.18487 ++ .type __avr32_u32_to_f64,@function
1.18488 ++__avr32_u32_to_f64:
1.18489 ++ sub r11, r12, 0 /* Move to r11 and force Z flag to be updated */
1.18490 ++ mov r12, 0 /* always positive */
1.18491 ++ rjmp 0f /* Jump to common code for floatsidf */
1.18492 ++
1.18493 ++ .global __avr32_s32_to_f64
1.18494 ++ .type __avr32_s32_to_f64,@function
1.18495 ++__avr32_s32_to_f64:
1.18496 ++ mov r11, r12 /* Keep original value in r12 for sign */
1.18497 ++ abs r11 /* Absolute value if r12 */
1.18498 ++0:
1.18499 ++ mov r10,0 /* let remaining bits be zero */
1.18500 ++ reteq r11 /* zero long will return zero float */
1.18501 ++
1.18502 ++ pushm lr
1.18503 ++ mov r9,31+1023 /* set exponent */
1.18504 ++
1.18505 ++ normalize_df r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */
1.18506 ++
1.18507 ++ /* Check if a subnormal result was created */
1.18508 ++ cp.w r9, 0
1.18509 ++ brgt 0f
1.18510 ++
1.18511 ++ adjust_subnormal_df r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */
1.18512 ++ popm pc
1.18513 ++0:
1.18514 ++
1.18515 ++ /* Round result */
1.18516 ++ round_df r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/
1.18517 ++ cp.w r9,0x7ff
1.18518 ++ brlt 0f
1.18519 ++ /*Return infinity */
1.18520 ++ mov r10, 0
1.18521 ++ mov_imm r11, 0xffe00000
1.18522 ++ rjmp __floatsidf_return_op1
1.18523 ++
1.18524 ++0:
1.18525 ++
1.18526 ++ /* Pack */
1.18527 ++ pack_df r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
1.18528 ++__floatsidf_return_op1:
1.18529 ++ lsl r12,1 /* shift in sign bit */
1.18530 ++ ror r11
1.18531 ++
1.18532 ++ popm pc
1.18533 ++#endif
1.18534 ++
1.18535 ++
1.18536 ++#ifdef L_avr32_f32_cmp_eq
1.18537 ++ .global __avr32_f32_cmp_eq
1.18538 ++ .type __avr32_f32_cmp_eq,@function
1.18539 ++__avr32_f32_cmp_eq:
1.18540 ++ cp.w r12, r11
1.18541 ++ breq 0f
1.18542 ++ /* If not equal check for +/-0 */
1.18543 ++ /* Or together the two values and shift out the sign bit.
1.18544 ++ If the result is zero, then the two values are both zero. */
1.18545 ++ or r12, r11
1.18546 ++ lsl r12, 1
1.18547 ++ reteq 1
1.18548 ++ ret 0
1.18549 ++0:
1.18550 ++ /* Numbers were equal. Check for NaN or Inf */
1.18551 ++ mov_imm r11, 0xff000000
1.18552 ++ lsl r12, 1
1.18553 ++ cp.w r12, r11
1.18554 ++ retls 1 /* 0 if NaN, 1 otherwise */
1.18555 ++ ret 0
1.18556 ++#endif
1.18557 ++
1.18558 ++#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt)
1.18559 ++#ifdef L_avr32_f32_cmp_ge
1.18560 ++ .global __avr32_f32_cmp_ge
1.18561 ++ .type __avr32_f32_cmp_ge,@function
1.18562 ++__avr32_f32_cmp_ge:
1.18563 ++#endif
1.18564 ++#ifdef L_avr32_f32_cmp_lt
1.18565 ++ .global __avr32_f32_cmp_lt
1.18566 ++ .type __avr32_f32_cmp_lt,@function
1.18567 ++__avr32_f32_cmp_lt:
1.18568 ++#endif
1.18569 ++ lsl r10, r12, 1 /* Remove sign bits */
1.18570 ++ lsl r9, r11, 1
1.18571 ++ subfeq r10, 0
1.18572 ++#ifdef L_avr32_f32_cmp_ge
1.18573 ++ reteq 1 /* Both number are zero. Return true. */
1.18574 ++#endif
1.18575 ++#ifdef L_avr32_f32_cmp_lt
1.18576 ++ reteq 0 /* Both number are zero. Return false. */
1.18577 ++#endif
1.18578 ++ mov_imm r8, 0xff000000
1.18579 ++ cp.w r10, r8
1.18580 ++ rethi 0 /* Op0 is NaN */
1.18581 ++ cp.w r9, r8
1.18582 ++ rethi 0 /* Op1 is Nan */
1.18583 ++
1.18584 ++ eor r8, r11, r12
1.18585 ++ bld r12, 31
1.18586 ++#ifdef L_avr32_f32_cmp_ge
1.18587 ++ srcc r8 /* Set result to true if op0 is positive*/
1.18588 ++#endif
1.18589 ++#ifdef L_avr32_f32_cmp_lt
1.18590 ++ srcs r8 /* Set result to true if op0 is negative*/
1.18591 ++#endif
1.18592 ++ retmi r8 /* Return if signs are different */
1.18593 ++ brcs 0f /* Both signs negative? */
1.18594 ++
1.18595 ++ /* Both signs positive */
1.18596 ++ cp.w r12, r11
1.18597 ++#ifdef L_avr32_f32_cmp_ge
1.18598 ++ reths 1
1.18599 ++ retlo 0
1.18600 ++#endif
1.18601 ++#ifdef L_avr32_f32_cmp_lt
1.18602 ++ reths 0
1.18603 ++ retlo 1
1.18604 ++#endif
1.18605 ++0:
1.18606 ++ /* Both signs negative */
1.18607 ++ cp.w r11, r12
1.18608 ++#ifdef L_avr32_f32_cmp_ge
1.18609 ++ reths 1
1.18610 ++ retlo 0
1.18611 ++#endif
1.18612 ++#ifdef L_avr32_f32_cmp_lt
1.18613 ++ reths 0
1.18614 ++ retlo 1
1.18615 ++#endif
1.18616 ++#endif
1.18617 ++
1.18618 ++
1.18619 ++#ifdef L_avr32_f64_cmp_eq
1.18620 ++ .global __avr32_f64_cmp_eq
1.18621 ++ .type __avr32_f64_cmp_eq,@function
1.18622 ++__avr32_f64_cmp_eq:
1.18623 ++ cp.w r10,r8
1.18624 ++ cpc r11,r9
1.18625 ++ breq 0f
1.18626 ++
1.18627 ++ /* Args were not equal*/
1.18628 ++ /* Both args could be zero with different sign bits */
1.18629 ++ lsl r11,1 /* get rid of sign bits */
1.18630 ++ lsl r9,1
1.18631 ++ or r11,r10 /* Check if all bits are zero */
1.18632 ++ or r11,r9
1.18633 ++ or r11,r8
1.18634 ++ reteq 1 /* If all zeros the arguments are equal
1.18635 ++ so return 1 else return 0 */
1.18636 ++ ret 0
1.18637 ++0:
1.18638 ++ /* check for NaN */
1.18639 ++ lsl r11,1
1.18640 ++ mov_imm r12, 0xffe00000
1.18641 ++ cp.w r10,0
1.18642 ++ cpc r11,r12 /* check if nan or inf */
1.18643 ++ retls 1 /* If Arg is NaN return 0 else 1*/
1.18644 ++ ret 0 /* Return */
1.18645 ++
1.18646 ++#endif
1.18647 ++
1.18648 ++
1.18649 ++#if defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt)
1.18650 ++
1.18651 ++#ifdef L_avr32_f64_cmp_ge
1.18652 ++ .global __avr32_f64_cmp_ge
1.18653 ++ .type __avr32_f64_cmp_ge,@function
1.18654 ++__avr32_f64_cmp_ge:
1.18655 ++#endif
1.18656 ++#ifdef L_avr32_f64_cmp_lt
1.18657 ++ .global __avr32_f64_cmp_lt
1.18658 ++ .type __avr32_f64_cmp_lt,@function
1.18659 ++__avr32_f64_cmp_lt:
1.18660 ++#endif
1.18661 ++
1.18662 ++ /* compare magnitude of op1 and op2 */
1.18663 ++ lsl r11,1 /* Remove sign bit of op1 */
1.18664 ++ srcs r12 /* Sign op1 to lsb of r12*/
1.18665 ++ subfeq r10, 0
1.18666 ++ breq 3f /* op1 zero */
1.18667 ++ lsl r9,1 /* Remove sign bit of op2 */
1.18668 ++ rol r12 /* Sign op2 to lsb of lr, sign bit op1 bit 1 of r12*/
1.18669 ++
1.18670 ++
1.18671 ++ /* Check for Nan */
1.18672 ++ pushm lr
1.18673 ++ mov_imm lr, 0xffe00000
1.18674 ++ cp.w r10,0
1.18675 ++ cpc r11,lr
1.18676 ++ brhi 0f /* We have NaN */
1.18677 ++ cp.w r8,0
1.18678 ++ cpc r9,lr
1.18679 ++ brhi 0f /* We have NaN */
1.18680 ++ popm lr
1.18681 ++
1.18682 ++ cp.w r12,3 /* both operands negative ?*/
1.18683 ++ breq 1f
1.18684 ++
1.18685 ++ cp.w r12,1 /* both operands positive? */
1.18686 ++ brlo 2f
1.18687 ++
1.18688 ++ /* Different signs. If sign of op1 is negative the difference
1.18689 ++ between op1 and op2 will always be negative, and if op1 is
1.18690 ++ positive the difference will always be positive */
1.18691 ++#ifdef L_avr32_f64_cmp_ge
1.18692 ++ reteq 1
1.18693 ++ retne 0
1.18694 ++#endif
1.18695 ++#ifdef L_avr32_f64_cmp_lt
1.18696 ++ reteq 0
1.18697 ++ retne 1
1.18698 ++#endif
1.18699 ++
1.18700 ++2:
1.18701 ++ /* Both operands positive. Just compute the difference */
1.18702 ++ cp.w r10,r8
1.18703 ++ cpc r11,r9
1.18704 ++#ifdef L_avr32_f64_cmp_ge
1.18705 ++ reths 1
1.18706 ++ retlo 0
1.18707 ++#endif
1.18708 ++#ifdef L_avr32_f64_cmp_lt
1.18709 ++ reths 0
1.18710 ++ retlo 1
1.18711 ++#endif
1.18712 ++
1.18713 ++1:
1.18714 ++ /* Both operands negative. Compute the difference with operands switched */
1.18715 ++ cp r8,r10
1.18716 ++ cpc r9,r11
1.18717 ++#ifdef L_avr32_f64_cmp_ge
1.18718 ++ reths 1
1.18719 ++ retlo 0
1.18720 ++#endif
1.18721 ++#ifdef L_avr32_f64_cmp_lt
1.18722 ++ reths 0
1.18723 ++ retlo 1
1.18724 ++#endif
1.18725 ++
1.18726 ++0:
1.18727 ++ popm pc, r12=0
1.18728 ++#endif
1.18729 ++
1.18730 ++3:
1.18731 ++ lsl r9,1 /* Remove sign bit of op1 */
1.18732 ++#ifdef L_avr32_f64_cmp_ge
1.18733 ++ srcs r12 /* If op2 is negative then op1 >= op2. */
1.18734 ++#endif
1.18735 ++#ifdef L_avr32_f64_cmp_lt
1.18736 ++ srcc r12 /* If op2 is positve then op1 <= op2. */
1.18737 ++#endif
1.18738 ++ subfeq r8, 0
1.18739 ++#ifdef L_avr32_f64_cmp_ge
1.18740 ++ reteq 1 /* Both operands are zero. Return true. */
1.18741 ++#endif
1.18742 ++#ifdef L_avr32_f64_cmp_lt
1.18743 ++ reteq 0 /* Both operands are zero. Return false. */
1.18744 ++#endif
1.18745 ++ ret r12
1.18746 ++
1.18747 ++
1.18748 ++#if defined(L_avr32_f64_div) || defined(L_avr32_f64_div_fast)
1.18749 ++ .align 2
1.18750 ++
1.18751 ++#if defined(L_avr32_f64_div_fast)
1.18752 ++ .global __avr32_f64_div_fast
1.18753 ++ .type __avr32_f64_div_fast,@function
1.18754 ++__avr32_f64_div_fast:
1.18755 ++#else
1.18756 ++ .global __avr32_f64_div
1.18757 ++ .type __avr32_f64_div,@function
1.18758 ++__avr32_f64_div:
1.18759 ++#endif
1.18760 ++ stm --sp, r0, r1, r2, r3, r4, r5, r6, r7,lr
1.18761 ++ /* op1 in {r11,r10}*/
1.18762 ++ /* op2 in {r9,r8}*/
1.18763 ++ eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
1.18764 ++
1.18765 ++
1.18766 ++ /* Unpack op1 to 2.62 format*/
1.18767 ++ /* exp: r7 */
1.18768 ++ /* sf: r11, r10 */
1.18769 ++ lsr r7, r11, 20 /* Extract exponent */
1.18770 ++
1.18771 ++ lsl r11, 9 /* Extract mantissa, leave room for implicit bit */
1.18772 ++ or r11, r11, r10>>23
1.18773 ++ lsl r10, 9
1.18774 ++ sbr r11, 29 /* Insert implicit bit */
1.18775 ++ andh r11, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
1.18776 ++
1.18777 ++ cbr r7, 11 /* Clear sign bit */
1.18778 ++ /* Check if normalization is needed */
1.18779 ++ breq 11f /*If number is subnormal, normalize it */
1.18780 ++22:
1.18781 ++ cp r7, 0x7ff
1.18782 ++ brge 2f /* Check op1 for NaN or Inf */
1.18783 ++
1.18784 ++ /* Unpack op2 to 2.62 format*/
1.18785 ++ /* exp: r6 */
1.18786 ++ /* sf: r9, r8 */
1.18787 ++ lsr r6, r9, 20 /* Extract exponent */
1.18788 ++
1.18789 ++ lsl r9, 9 /* Extract mantissa, leave room for implicit bit */
1.18790 ++ or r9, r9, r8>>23
1.18791 ++ lsl r8, 9
1.18792 ++ sbr r9, 29 /* Insert implicit bit */
1.18793 ++ andh r9, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
1.18794 ++
1.18795 ++ cbr r6, 11 /* Clear sign bit */
1.18796 ++ /* Check if normalization is needed */
1.18797 ++ breq 13f /*If number is subnormal, normalize it */
1.18798 ++23:
1.18799 ++ cp r6, 0x7ff
1.18800 ++ brge 3f /* Check op2 for NaN or Inf */
1.18801 ++
1.18802 ++ /* Calculate new exponent */
1.18803 ++ sub r7, r6
1.18804 ++ sub r7,-1023
1.18805 ++
1.18806 ++ /* Divide */
1.18807 ++ /* Approximating 1/d with the following recurrence: */
1.18808 ++ /* R[j+1] = R[j]*(2-R[j]*d) */
1.18809 ++ /* Using 2.62 format */
1.18810 ++ /* TWO: r12 */
1.18811 ++ /* d = op2 = divisor (2.62 format): r9,r8 */
1.18812 ++ /* Multiply result : r5, r4 */
1.18813 ++ /* Initial guess : r3, r2 */
1.18814 ++ /* New approximations : r3, r2 */
1.18815 ++ /* op1 = Dividend (2.62 format) : r11, r10 */
1.18816 ++
1.18817 ++ mov_imm r12, 0x80000000
1.18818 ++
1.18819 ++ /* Load initial guess, using look-up table */
1.18820 ++ /* Initial guess is of format 01.XY, where XY is constructed as follows: */
1.18821 ++ /* Let d be of following format: 00.1xy....., then XY=~xy */
1.18822 ++ /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
1.18823 ++ /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
1.18824 ++ /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
1.18825 ++ /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
1.18826 ++ /* r2 is also part of the reg pair forming initial guess, but it*/
1.18827 ++ /* is kept uninitialized to save one cycle since it has so low significance*/
1.18828 ++
1.18829 ++ lsr r3, r12, 1
1.18830 ++ bfextu r4, r9, 27, 2
1.18831 ++ com r4
1.18832 ++ bfins r3, r4, 28, 2
1.18833 ++
1.18834 ++ /* First approximation */
1.18835 ++ /* Approximating to 32 bits */
1.18836 ++ /* r5 = R[j]*d */
1.18837 ++ mulu.d r4, r3, r9
1.18838 ++ /* r5 = 2-R[j]*d */
1.18839 ++ sub r5, r12, r5<<2
1.18840 ++ /* r3 = R[j]*(2-R[j]*d) */
1.18841 ++ mulu.d r4, r3, r5
1.18842 ++ lsl r3, r5, 2
1.18843 ++
1.18844 ++ /* Second approximation */
1.18845 ++ /* Approximating to 32 bits */
1.18846 ++ /* r5 = R[j]*d */
1.18847 ++ mulu.d r4, r3, r9
1.18848 ++ /* r5 = 2-R[j]*d */
1.18849 ++ sub r5, r12, r5<<2
1.18850 ++ /* r3 = R[j]*(2-R[j]*d) */
1.18851 ++ mulu.d r4, r3, r5
1.18852 ++ lsl r3, r5, 2
1.18853 ++
1.18854 ++ /* Third approximation */
1.18855 ++ /* Approximating to 32 bits */
1.18856 ++ /* r5 = R[j]*d */
1.18857 ++ mulu.d r4, r3, r9
1.18858 ++ /* r5 = 2-R[j]*d */
1.18859 ++ sub r5, r12, r5<<2
1.18860 ++ /* r3 = R[j]*(2-R[j]*d) */
1.18861 ++ mulu.d r4, r3, r5
1.18862 ++ lsl r3, r5, 2
1.18863 ++
1.18864 ++ /* Fourth approximation */
1.18865 ++ /* Approximating to 64 bits */
1.18866 ++ /* r5,r4 = R[j]*d */
1.18867 ++ mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
1.18868 ++ lsl r5, 2
1.18869 ++ or r5, r5, r4>>30
1.18870 ++ lsl r4, 2
1.18871 ++ /* r5,r4 = 2-R[j]*d */
1.18872 ++ neg r4
1.18873 ++ sbc r5, r12, r5
1.18874 ++ /* r3,r2 = R[j]*(2-R[j]*d) */
1.18875 ++ mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
1.18876 ++ lsl r3, r5, 2
1.18877 ++ or r3, r3, r4>>30
1.18878 ++ lsl r2, r4, 2
1.18879 ++
1.18880 ++
1.18881 ++ /* Fifth approximation */
1.18882 ++ /* Approximating to 64 bits */
1.18883 ++ /* r5,r4 = R[j]*d */
1.18884 ++ mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
1.18885 ++ lsl r5, 2
1.18886 ++ or r5, r5, r4>>30
1.18887 ++ lsl r4, 2
1.18888 ++ /* r5,r4 = 2-R[j]*d */
1.18889 ++ neg r4
1.18890 ++ sbc r5, r12, r5
1.18891 ++ /* r3,r2 = R[j]*(2-R[j]*d) */
1.18892 ++ mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
1.18893 ++ lsl r3, r5, 2
1.18894 ++ or r3, r3, r4>>30
1.18895 ++ lsl r2, r4, 2
1.18896 ++
1.18897 ++
1.18898 ++ /* Multiply with dividend to get quotient */
1.18899 ++ mul_approx_df r3 /*ah*/, r2 /*al*/, r11 /*bh*/, r10 /*bl*/, r3 /*rh*/, r2 /*rl*/, r1 /*sh*/, r0 /*sl*/
1.18900 ++
1.18901 ++
1.18902 ++ /* To increase speed, this result is not corrected before final rounding.*/
1.18903 ++ /* This may give a difference to IEEE compliant code of 1 ULP.*/
1.18904 ++
1.18905 ++
1.18906 ++ /* Adjust exponent and mantissa */
1.18907 ++ /* r7:exp, [r3, r2]:mant, [r5, r4]:scratch*/
1.18908 ++ /* Mantissa may be of the format 0.xxxx or 1.xxxx. */
1.18909 ++ /* In the first case, shift one pos to left.*/
1.18910 ++ bld r3, 31-3
1.18911 ++ breq 0f
1.18912 ++ lsl r2, 1
1.18913 ++ rol r3
1.18914 ++ sub r7, 1
1.18915 ++#if defined(L_avr32_f64_div)
1.18916 ++ /* We must scale down the dividend to 5.59 format. */
1.18917 ++ lsr r10, 3
1.18918 ++ or r10, r10, r11 << 29
1.18919 ++ lsr r11, 3
1.18920 ++ rjmp 1f
1.18921 ++#endif
1.18922 ++0:
1.18923 ++#if defined(L_avr32_f64_div)
1.18924 ++ /* We must scale down the dividend to 6.58 format. */
1.18925 ++ lsr r10, 4
1.18926 ++ or r10, r10, r11 << 28
1.18927 ++ lsr r11, 4
1.18928 ++1:
1.18929 ++#endif
1.18930 ++ cp r7, 0
1.18931 ++ brle __avr32_f64_div_res_subnormal /* Result was subnormal. */
1.18932 ++
1.18933 ++
1.18934 ++#if defined(L_avr32_f64_div)
1.18935 ++ /* In order to round correctly we calculate the remainder:
1.18936 ++ Remainder = dividend[11:r10] - divisor[r9:r8]*quotient[r3:r2]
1.18937 ++ for the case when the quotient is halfway between the round-up
1.18938 ++ value and the round down value. If the remainder then is negative
1.18939 ++ it means that the quotient was to big and that it should not be
1.18940 ++ rounded up, if the remainder is positive the quotient was to small
1.18941 ++ and we need to round up. If the remainder is zero it means that the
1.18942 ++ quotient is exact but since we need to remove the guard bit we should
1.18943 ++ round to even. */
1.18944 ++
1.18945 ++ /* Truncate and add guard bit. */
1.18946 ++ andl r2, 0xff00
1.18947 ++ orl r2, 0x0080
1.18948 ++
1.18949 ++
1.18950 ++ /* Now do the multiplication. The quotient has the format 4.60
1.18951 ++ while the divisor has the format 2.62 which gives a result
1.18952 ++ of 6.58 */
1.18953 ++ mulu.d r0, r3, r8
1.18954 ++ macu.d r0, r2, r9
1.18955 ++ mulu.d r4, r2, r8
1.18956 ++ mulu.d r8, r3, r9
1.18957 ++ add r5, r0
1.18958 ++ adc r8, r8, r1
1.18959 ++ acr r9
1.18960 ++
1.18961 ++
1.18962 ++ /* Check if remainder is positive, negative or equal. */
1.18963 ++ bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
1.18964 ++ cp r4, 0
1.18965 ++ cpc r5
1.18966 ++__avr32_f64_div_round_subnormal:
1.18967 ++ cpc r8, r10
1.18968 ++ cpc r9, r11
1.18969 ++ srlo r6 /* Remainder positive: we need to round up.*/
1.18970 ++ moveq r6, r12 /* Remainder zero: round up if mantissa odd. */
1.18971 ++#else
1.18972 ++ bfextu r6, r2, 7, 1 /* Get guard bit */
1.18973 ++#endif
1.18974 ++ /* Final packing, scale down mantissa. */
1.18975 ++ lsr r10, r2, 8
1.18976 ++ or r10, r10, r3<<24
1.18977 ++ lsr r11, r3, 8
1.18978 ++ /* Insert exponent and sign bit*/
1.18979 ++ bfins r11, r7, 20, 11
1.18980 ++ bld lr, 31
1.18981 ++ bst r11, 31
1.18982 ++
1.18983 ++ /* Final rounding */
1.18984 ++ add r10, r6
1.18985 ++ acr r11
1.18986 ++
1.18987 ++ /* Return result in [r11,r10] */
1.18988 ++ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
1.18989 ++
1.18990 ++
1.18991 ++2:
1.18992 ++ /* Op1 is NaN or inf */
1.18993 ++ andh r11, 0x000f /* Extract mantissa */
1.18994 ++ or r11, r10
1.18995 ++ brne 16f /* Return NaN if op1 is NaN */
1.18996 ++ /* Op1 is inf check op2 */
1.18997 ++ lsr r6, r9, 20 /* Extract exponent */
1.18998 ++ cbr r6, 8 /* Clear sign bit */
1.18999 ++ cp r6, 0x7ff
1.19000 ++ brne 17f /* Inf/number gives inf, return inf */
1.19001 ++ rjmp 16f /* The rest gives NaN*/
1.19002 ++
1.19003 ++3:
1.19004 ++ /* Op1 is a valid number. Op 2 is NaN or inf */
1.19005 ++ andh r9, 0x000f /* Extract mantissa */
1.19006 ++ or r9, r8
1.19007 ++ brne 16f /* Return NaN if op2 is NaN */
1.19008 ++ rjmp 15f /* Op2 was inf, return zero*/
1.19009 ++
1.19010 ++11: /* Op1 was denormal. Fix it. */
1.19011 ++ lsl r11, 3
1.19012 ++ or r11, r11, r10 >> 29
1.19013 ++ lsl r10, 3
1.19014 ++ /* Check if op1 is zero. */
1.19015 ++ or r4, r10, r11
1.19016 ++ breq __avr32_f64_div_op1_zero
1.19017 ++ normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/
1.19018 ++ lsr r10, 2
1.19019 ++ or r10, r10, r11 << 30
1.19020 ++ lsr r11, 2
1.19021 ++ rjmp 22b
1.19022 ++
1.19023 ++
1.19024 ++13: /* Op2 was denormal. Fix it */
1.19025 ++ lsl r9, 3
1.19026 ++ or r9, r9, r8 >> 29
1.19027 ++ lsl r8, 3
1.19028 ++ /* Check if op2 is zero. */
1.19029 ++ or r4, r9, r8
1.19030 ++ breq 17f /* Divisor is zero -> return Inf */
1.19031 ++ normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/
1.19032 ++ lsr r8, 2
1.19033 ++ or r8, r8, r9 << 30
1.19034 ++ lsr r9, 2
1.19035 ++ rjmp 23b
1.19036 ++
1.19037 ++
1.19038 ++__avr32_f64_div_res_subnormal:/* Divide result was subnormal. */
1.19039 ++#if defined(L_avr32_f64_div)
1.19040 ++ /* Check how much we must scale down the mantissa. */
1.19041 ++ neg r7
1.19042 ++ sub r7, -1 /* We do no longer have an implicit bit. */
1.19043 ++ satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
1.19044 ++ cp.w r7, 32
1.19045 ++ brge 0f
1.19046 ++ /* Shift amount <32 */
1.19047 ++ /* Scale down quotient */
1.19048 ++ rsub r6, r7, 32
1.19049 ++ lsr r2, r2, r7
1.19050 ++ lsl r12, r3, r6
1.19051 ++ or r2, r12
1.19052 ++ lsr r3, r3, r7
1.19053 ++ /* Scale down the dividend to match the scaling of the quotient. */
1.19054 ++ lsl r1, r10, r6
1.19055 ++ lsr r10, r10, r7
1.19056 ++ lsl r12, r11, r6
1.19057 ++ or r10, r12
1.19058 ++ lsr r11, r11, r7
1.19059 ++ mov r0, 0
1.19060 ++ rjmp 1f
1.19061 ++0:
1.19062 ++ /* Shift amount >=32 */
1.19063 ++ rsub r6, r7, 32
1.19064 ++ moveq r0, 0
1.19065 ++ moveq r12, 0
1.19066 ++ breq 0f
1.19067 ++ lsl r0, r10, r6
1.19068 ++ lsl r12, r11, r6
1.19069 ++0:
1.19070 ++ lsr r2, r3, r7
1.19071 ++ mov r3, 0
1.19072 ++ /* Scale down the dividend to match the scaling of the quotient. */
1.19073 ++ lsr r1, r10, r7
1.19074 ++ or r1, r12
1.19075 ++ lsr r10, r11, r7
1.19076 ++ mov r11, 0
1.19077 ++1:
1.19078 ++ /* Start performing the same rounding as done for normal numbers
1.19079 ++ but this time we have scaled the quotient and dividend and hence
1.19080 ++ need a little different comparison. */
1.19081 ++ /* Truncate and add guard bit. */
1.19082 ++ andl r2, 0xff00
1.19083 ++ orl r2, 0x0080
1.19084 ++
1.19085 ++ /* Now do the multiplication. */
1.19086 ++ mulu.d r6, r3, r8
1.19087 ++ macu.d r6, r2, r9
1.19088 ++ mulu.d r4, r2, r8
1.19089 ++ mulu.d r8, r3, r9
1.19090 ++ add r5, r6
1.19091 ++ adc r8, r8, r7
1.19092 ++ acr r9
1.19093 ++
1.19094 ++ /* Set exponent to 0 */
1.19095 ++ mov r7, 0
1.19096 ++
1.19097 ++ /* Check if remainder is positive, negative or equal. */
1.19098 ++ bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
1.19099 ++ cp r4, r0
1.19100 ++ cpc r5, r1
1.19101 ++ /* Now the rest of the rounding is the same as for normals. */
1.19102 ++ rjmp __avr32_f64_div_round_subnormal
1.19103 ++
1.19104 ++#endif
1.19105 ++15:
1.19106 ++ /* Flush to zero for the fast version. */
1.19107 ++ mov r11, lr /*Get correct sign*/
1.19108 ++ andh r11, 0x8000, COH
1.19109 ++ mov r10, 0
1.19110 ++ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
1.19111 ++
1.19112 ++16: /* Return NaN. */
1.19113 ++ mov r11, -1
1.19114 ++ mov r10, -1
1.19115 ++ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
1.19116 ++
1.19117 ++17: /* Return INF. */
1.19118 ++ mov r11, lr /*Get correct sign*/
1.19119 ++ andh r11, 0x8000, COH
1.19120 ++ orh r11, 0x7ff0
1.19121 ++ mov r10, 0
1.19122 ++ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
1.19123 ++
1.19124 ++__avr32_f64_div_op1_zero:
1.19125 ++ or r5, r8, r9 << 1
1.19126 ++ breq 16b /* 0.0/0.0 -> NaN */
1.19127 ++ bfextu r4, r9, 20, 11
1.19128 ++ cp r4, 0x7ff
1.19129 ++ brne 15b /* Return zero */
1.19130 ++ /* Check if divisor is Inf or NaN */
1.19131 ++ or r5, r8, r9 << 12
1.19132 ++ breq 15b /* Divisor is inf -> return zero */
1.19133 ++ rjmp 16b /* Return NaN */
1.19134 ++
1.19135 ++
1.19136 ++
1.19137 ++
1.19138 ++#endif
1.19139 ++
1.19140 ++#if defined(L_avr32_f32_addsub) || defined(L_avr32_f32_addsub_fast)
1.19141 ++
1.19142 ++ .align 2
1.19143 ++__avr32_f32_sub_from_add:
1.19144 ++ /* Switch sign on op2 */
1.19145 ++ eorh r11, 0x8000
1.19146 ++
1.19147 ++#if defined(L_avr32_f32_addsub_fast)
1.19148 ++ .global __avr32_f32_sub_fast
1.19149 ++ .type __avr32_f32_sub_fast,@function
1.19150 ++__avr32_f32_sub_fast:
1.19151 ++#else
1.19152 ++ .global __avr32_f32_sub
1.19153 ++ .type __avr32_f32_sub,@function
1.19154 ++__avr32_f32_sub:
1.19155 ++#endif
1.19156 ++
1.19157 ++ /* Check signs */
1.19158 ++ eor r8, r11, r12
1.19159 ++ /* Different signs, use subtraction. */
1.19160 ++ brmi __avr32_f32_add_from_sub
1.19161 ++
1.19162 ++ /* Get sign of op1 */
1.19163 ++ mov r8, r12
1.19164 ++ andh r12, 0x8000, COH
1.19165 ++
1.19166 ++ /* Remove sign from operands */
1.19167 ++ cbr r11, 31
1.19168 ++#if defined(L_avr32_f32_addsub_fast)
1.19169 ++ reteq r8 /* If op2 is zero return op1 */
1.19170 ++#endif
1.19171 ++ cbr r8, 31
1.19172 ++
1.19173 ++ /* Put the number with the largest exponent in r10
1.19174 ++ and the number with the smallest exponent in r9 */
1.19175 ++ max r10, r8, r11
1.19176 ++ min r9, r8, r11
1.19177 ++ cp r10, r8 /*If largest operand (in R10) is not equal to op1*/
1.19178 ++ subne r12, 1 /* Subtract 1 from sign, which will invert MSB of r12*/
1.19179 ++ andh r12, 0x8000, COH /*Mask all but MSB*/
1.19180 ++
1.19181 ++ /* Unpack exponent and mantissa of op1 */
1.19182 ++ lsl r8, r10, 8
1.19183 ++ sbr r8, 31 /* Set implicit bit. */
1.19184 ++ lsr r10, 23
1.19185 ++
1.19186 ++ /* op1 is NaN or Inf. */
1.19187 ++ cp.w r10, 0xff
1.19188 ++ breq __avr32_f32_sub_op1_nan_or_inf
1.19189 ++
1.19190 ++ /* Unpack exponent and mantissa of op2 */
1.19191 ++ lsl r11, r9, 8
1.19192 ++ sbr r11, 31 /* Set implicit bit. */
1.19193 ++ lsr r9, 23
1.19194 ++
1.19195 ++#if defined(L_avr32_f32_addsub)
1.19196 ++ /* Keep sticky bit for correct IEEE rounding */
1.19197 ++ st.w --sp, r12
1.19198 ++
1.19199 ++ /* op2 is either zero or subnormal. */
1.19200 ++ breq __avr32_f32_sub_op2_subnormal
1.19201 ++0:
1.19202 ++ /* Get shift amount to scale mantissa of op2. */
1.19203 ++ sub r12, r10, r9
1.19204 ++
1.19205 ++ breq __avr32_f32_sub_shift_done
1.19206 ++
1.19207 ++ /* Saturate the shift amount to 31. If the amount
1.19208 ++ is any larger op2 is insignificant. */
1.19209 ++ satu r12 >> 0, 5
1.19210 ++
1.19211 ++ /* Put the remaining bits into r9.*/
1.19212 ++ rsub r9, r12, 32
1.19213 ++ lsl r9, r11, r9
1.19214 ++
1.19215 ++ /* If the remaining bits are non-zero then we must subtract one
1.19216 ++ more from opL. */
1.19217 ++ subne r8, 1
1.19218 ++ srne r9 /* LSB of r9 represents sticky bits. */
1.19219 ++
1.19220 ++ /* Shift mantissa of op2 to same decimal point as the mantissa
1.19221 ++ of op1. */
1.19222 ++ lsr r11, r11, r12
1.19223 ++
1.19224 ++
1.19225 ++__avr32_f32_sub_shift_done:
1.19226 ++ /* Now subtract the mantissas. */
1.19227 ++ sub r8, r11
1.19228 ++
1.19229 ++ ld.w r12, sp++
1.19230 ++
1.19231 ++ /* Normalize resulting mantissa. */
1.19232 ++ clz r11, r8
1.19233 ++
1.19234 ++ retcs 0
1.19235 ++ lsl r8, r8, r11
1.19236 ++ sub r10, r11
1.19237 ++ brle __avr32_f32_sub_subnormal_result
1.19238 ++
1.19239 ++ /* Insert the bits we will remove from the mantissa into r9[31:24] */
1.19240 ++ or r9, r9, r8 << 24
1.19241 ++#else
1.19242 ++ /* Ignore sticky bit to simplify and speed up rounding */
1.19243 ++ /* op2 is either zero or subnormal. */
1.19244 ++ breq __avr32_f32_sub_op2_subnormal
1.19245 ++0:
1.19246 ++ /* Get shift amount to scale mantissa of op2. */
1.19247 ++ rsub r9, r10
1.19248 ++
1.19249 ++ /* Saturate the shift amount to 31. If the amount
1.19250 ++ is any larger op2 is insignificant. */
1.19251 ++ satu r9 >> 0, 5
1.19252 ++
1.19253 ++ /* Shift mantissa of op2 to same decimal point as the mantissa
1.19254 ++ of op1. */
1.19255 ++ lsr r11, r11, r9
1.19256 ++
1.19257 ++ /* Now subtract the mantissas. */
1.19258 ++ sub r8, r11
1.19259 ++
1.19260 ++ /* Normalize resulting mantissa. */
1.19261 ++ clz r9, r8
1.19262 ++ retcs 0
1.19263 ++ lsl r8, r8, r9
1.19264 ++ sub r10, r9
1.19265 ++ brle __avr32_f32_sub_subnormal_result
1.19266 ++#endif
1.19267 ++
1.19268 ++ /* Pack result. */
1.19269 ++ or r12, r12, r8 >> 8
1.19270 ++ bfins r12, r10, 23, 8
1.19271 ++
1.19272 ++ /* Round */
1.19273 ++__avr32_f32_sub_round:
1.19274 ++#if defined(L_avr32_f32_addsub)
1.19275 ++ mov_imm r10, 0x80000000
1.19276 ++ bld r12, 0
1.19277 ++ subne r10, -1
1.19278 ++ cp.w r9, r10
1.19279 ++ subhs r12, -1
1.19280 ++#else
1.19281 ++ bld r8, 7
1.19282 ++ acr r12
1.19283 ++#endif
1.19284 ++
1.19285 ++ ret r12
1.19286 ++
1.19287 ++
1.19288 ++__avr32_f32_sub_op2_subnormal:
1.19289 ++ /* Fix implicit bit and adjust exponent of subnormals. */
1.19290 ++ cbr r11, 31
1.19291 ++ /* Set exponent to 1 if we do not have a zero. */
1.19292 ++ movne r9,1
1.19293 ++
1.19294 ++ /* Check if op1 is also subnormal. */
1.19295 ++ cp.w r10, 0
1.19296 ++ brne 0b
1.19297 ++
1.19298 ++ cbr r8, 31
1.19299 ++ /* If op1 is not zero set exponent to 1. */
1.19300 ++ movne r10,1
1.19301 ++
1.19302 ++ rjmp 0b
1.19303 ++
1.19304 ++__avr32_f32_sub_op1_nan_or_inf:
1.19305 ++ /* Check if op1 is NaN, if so return NaN */
1.19306 ++ lsl r11, r8, 1
1.19307 ++ retne -1
1.19308 ++
1.19309 ++ /* op1 is Inf. */
1.19310 ++ bfins r12, r10, 23, 8 /* Generate Inf in r12 */
1.19311 ++
1.19312 ++ /* Check if op2 is Inf. or NaN */
1.19313 ++ lsr r11, r9, 23
1.19314 ++ cp.w r11, 0xff
1.19315 ++ retne r12 /* op2 not Inf or NaN, return op1 */
1.19316 ++
1.19317 ++ ret -1 /* op2 Inf or NaN, return NaN */
1.19318 ++
1.19319 ++__avr32_f32_sub_subnormal_result:
1.19320 ++ /* Check if the number is so small that
1.19321 ++ it will be represented with zero. */
1.19322 ++ rsub r10, r10, 9
1.19323 ++ rsub r11, r10, 32
1.19324 ++ retcs 0
1.19325 ++
1.19326 ++ /* Shift the mantissa into the correct position.*/
1.19327 ++ lsr r10, r8, r10
1.19328 ++ /* Add sign bit. */
1.19329 ++ or r12, r10
1.19330 ++
1.19331 ++ /* Put the shifted out bits in the most significant part
1.19332 ++ of r8. */
1.19333 ++ lsl r8, r8, r11
1.19334 ++
1.19335 ++#if defined(L_avr32_f32_addsub)
1.19336 ++ /* Add all the remainder bits used for rounding into r9 */
1.19337 ++ or r9, r8
1.19338 ++#else
1.19339 ++ lsr r8, 24
1.19340 ++#endif
1.19341 ++ rjmp __avr32_f32_sub_round
1.19342 ++
1.19343 ++
1.19344 ++ .align 2
1.19345 ++
1.19346 ++__avr32_f32_add_from_sub:
1.19347 ++ /* Switch sign on op2 */
1.19348 ++ eorh r11, 0x8000
1.19349 ++
1.19350 ++#if defined(L_avr32_f32_addsub_fast)
1.19351 ++ .global __avr32_f32_add_fast
1.19352 ++ .type __avr32_f32_add_fast,@function
1.19353 ++__avr32_f32_add_fast:
1.19354 ++#else
1.19355 ++ .global __avr32_f32_add
1.19356 ++ .type __avr32_f32_add,@function
1.19357 ++__avr32_f32_add:
1.19358 ++#endif
1.19359 ++
1.19360 ++ /* Check signs */
1.19361 ++ eor r8, r11, r12
1.19362 ++ /* Different signs, use subtraction. */
1.19363 ++ brmi __avr32_f32_sub_from_add
1.19364 ++
1.19365 ++ /* Get sign of op1 */
1.19366 ++ mov r8, r12
1.19367 ++ andh r12, 0x8000, COH
1.19368 ++
1.19369 ++ /* Remove sign from operands */
1.19370 ++ cbr r11, 31
1.19371 ++#if defined(L_avr32_f32_addsub_fast)
1.19372 ++ reteq r8 /* If op2 is zero return op1 */
1.19373 ++#endif
1.19374 ++ cbr r8, 31
1.19375 ++
1.19376 ++ /* Put the number with the largest exponent in r10
1.19377 ++ and the number with the smallest exponent in r9 */
1.19378 ++ max r10, r8, r11
1.19379 ++ min r9, r8, r11
1.19380 ++
1.19381 ++ /* Unpack exponent and mantissa of op1 */
1.19382 ++ lsl r8, r10, 8
1.19383 ++ sbr r8, 31 /* Set implicit bit. */
1.19384 ++ lsr r10, 23
1.19385 ++
1.19386 ++ /* op1 is NaN or Inf. */
1.19387 ++ cp.w r10, 0xff
1.19388 ++ breq __avr32_f32_add_op1_nan_or_inf
1.19389 ++
1.19390 ++ /* Unpack exponent and mantissa of op2 */
1.19391 ++ lsl r11, r9, 8
1.19392 ++ sbr r11, 31 /* Set implicit bit. */
1.19393 ++ lsr r9, 23
1.19394 ++
1.19395 ++#if defined(L_avr32_f32_addsub)
1.19396 ++ /* op2 is either zero or subnormal. */
1.19397 ++ breq __avr32_f32_add_op2_subnormal
1.19398 ++0:
1.19399 ++ /* Keep sticky bit for correct IEEE rounding */
1.19400 ++ st.w --sp, r12
1.19401 ++
1.19402 ++ /* Get shift amount to scale mantissa of op2. */
1.19403 ++ rsub r9, r10
1.19404 ++
1.19405 ++ /* Saturate the shift amount to 31. If the amount
1.19406 ++ is any larger op2 is insignificant. */
1.19407 ++ satu r9 >> 0, 5
1.19408 ++
1.19409 ++ /* Shift mantissa of op2 to same decimal point as the mantissa
1.19410 ++ of op1. */
1.19411 ++ lsr r12, r11, r9
1.19412 ++
1.19413 ++ /* Put the remainding bits into r11[23:..].*/
1.19414 ++ rsub r9, r9, (32-8)
1.19415 ++ lsl r11, r11, r9
1.19416 ++ /* Insert the bits we will remove from the mantissa into r11[31:24] */
1.19417 ++ bfins r11, r12, 24, 8
1.19418 ++
1.19419 ++ /* Now add the mantissas. */
1.19420 ++ add r8, r12
1.19421 ++
1.19422 ++ ld.w r12, sp++
1.19423 ++#else
1.19424 ++ /* Ignore sticky bit to simplify and speed up rounding */
1.19425 ++ /* op2 is either zero or subnormal. */
1.19426 ++ breq __avr32_f32_add_op2_subnormal
1.19427 ++0:
1.19428 ++ /* Get shift amount to scale mantissa of op2. */
1.19429 ++ rsub r9, r10
1.19430 ++
1.19431 ++ /* Saturate the shift amount to 31. If the amount
1.19432 ++ is any larger op2 is insignificant. */
1.19433 ++ satu r9 >> 0, 5
1.19434 ++
1.19435 ++ /* Shift mantissa of op2 to same decimal point as the mantissa
1.19436 ++ of op1. */
1.19437 ++ lsr r11, r11, r9
1.19438 ++
1.19439 ++ /* Now add the mantissas. */
1.19440 ++ add r8, r11
1.19441 ++
1.19442 ++#endif
1.19443 ++ /* Check if we overflowed. */
1.19444 ++ brcs __avr32_f32_add_res_of
1.19445 ++1:
1.19446 ++ /* Pack result. */
1.19447 ++ or r12, r12, r8 >> 8
1.19448 ++ bfins r12, r10, 23, 8
1.19449 ++
1.19450 ++ /* Round */
1.19451 ++#if defined(L_avr32_f32_addsub)
1.19452 ++ mov_imm r10, 0x80000000
1.19453 ++ bld r12, 0
1.19454 ++ subne r10, -1
1.19455 ++ cp.w r11, r10
1.19456 ++ subhs r12, -1
1.19457 ++#else
1.19458 ++ bld r8, 7
1.19459 ++ acr r12
1.19460 ++#endif
1.19461 ++
1.19462 ++ ret r12
1.19463 ++
1.19464 ++__avr32_f32_add_op2_subnormal:
1.19465 ++ /* Fix implicit bit and adjust exponent of subnormals. */
1.19466 ++ cbr r11, 31
1.19467 ++ /* Set exponent to 1 if we do not have a zero. */
1.19468 ++ movne r9,1
1.19469 ++
1.19470 ++ /* Check if op1 is also subnormal. */
1.19471 ++ cp.w r10, 0
1.19472 ++ brne 0b
1.19473 ++ /* Both operands subnormal, just add the mantissas and
1.19474 ++ pack. If the addition of the subnormal numbers results
1.19475 ++ in a normal number then the exponent will automatically
1.19476 ++ be set to 1 by the addition. */
1.19477 ++ cbr r8, 31
1.19478 ++ add r11, r8
1.19479 ++ or r12, r12, r11 >> 8
1.19480 ++ ret r12
1.19481 ++
1.19482 ++__avr32_f32_add_op1_nan_or_inf:
1.19483 ++ /* Check if op1 is NaN, if so return NaN */
1.19484 ++ lsl r11, r8, 1
1.19485 ++ retne -1
1.19486 ++
1.19487 ++ /* op1 is Inf. */
1.19488 ++ bfins r12, r10, 23, 8 /* Generate Inf in r12 */
1.19489 ++
1.19490 ++ /* Check if op2 is Inf. or NaN */
1.19491 ++ lsr r11, r9, 23
1.19492 ++ cp.w r11, 0xff
1.19493 ++ retne r12 /* op2 not Inf or NaN, return op1 */
1.19494 ++
1.19495 ++ lsl r9, 9
1.19496 ++ reteq r12 /* op2 Inf return op1 */
1.19497 ++ ret -1 /* op2 is NaN, return NaN */
1.19498 ++
1.19499 ++__avr32_f32_add_res_of:
1.19500 ++ /* We overflowed. Increase exponent and shift mantissa.*/
1.19501 ++ lsr r8, 1
1.19502 ++ sub r10, -1
1.19503 ++
1.19504 ++ /* Clear mantissa to set result to Inf if the exponent is 255. */
1.19505 ++ cp.w r10, 255
1.19506 ++ moveq r8, 0
1.19507 ++ moveq r11, 0
1.19508 ++ rjmp 1b
1.19509 ++
1.19510 ++
1.19511 ++#endif
1.19512 ++
1.19513 ++
1.19514 ++#if defined(L_avr32_f32_div) || defined(L_avr32_f32_div_fast)
1.19515 ++ .align 2
1.19516 ++
1.19517 ++#if defined(L_avr32_f32_div_fast)
1.19518 ++ .global __avr32_f32_div_fast
1.19519 ++ .type __avr32_f32_div_fast,@function
1.19520 ++__avr32_f32_div_fast:
1.19521 ++#else
1.19522 ++ .global __avr32_f32_div
1.19523 ++ .type __avr32_f32_div,@function
1.19524 ++__avr32_f32_div:
1.19525 ++#endif
1.19526 ++
1.19527 ++ eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
1.19528 ++
1.19529 ++ /* Unpack */
1.19530 ++ lsl r12,1
1.19531 ++ reteq 0 /* Return zero if op1 is zero */
1.19532 ++ lsl r11,1
1.19533 ++ breq 4f /* Check op2 for zero */
1.19534 ++
1.19535 ++ /* Unpack op1*/
1.19536 ++ /* exp: r9 */
1.19537 ++ /* sf: r12 */
1.19538 ++ lsr r9, r12, 24
1.19539 ++ breq 11f /*If number is subnormal*/
1.19540 ++ cp r9, 0xff
1.19541 ++ brhs 2f /* Check op1 for NaN or Inf */
1.19542 ++ lsl r12, 7
1.19543 ++ sbr r12, 31 /*Implicit bit*/
1.19544 ++12:
1.19545 ++
1.19546 ++ /* Unpack op2*/
1.19547 ++ /* exp: r10 */
1.19548 ++ /* sf: r11 */
1.19549 ++ lsr r10, r11, 24
1.19550 ++ breq 13f /*If number is subnormal*/
1.19551 ++ cp r10, 0xff
1.19552 ++ brhs 3f /* Check op2 for NaN or Inf */
1.19553 ++
1.19554 ++ lsl r11,7
1.19555 ++ sbr r11, 31 /*Implicit bit*/
1.19556 ++14:
1.19557 ++
1.19558 ++ /* For UC3, store with predecrement is faster than stm */
1.19559 ++ st.w --sp, r5
1.19560 ++ st.d --sp, r6
1.19561 ++
1.19562 ++ /* Calculate new exponent */
1.19563 ++ sub r9, r10
1.19564 ++ sub r9,-127
1.19565 ++
1.19566 ++ /* Divide */
1.19567 ++ /* Approximating 1/d with the following recurrence: */
1.19568 ++ /* R[j+1] = R[j]*(2-R[j]*d) */
1.19569 ++ /* Using 2.30 format */
1.19570 ++ /* TWO: r10 */
1.19571 ++ /* d: r5 */
1.19572 ++ /* Multiply result : r6, r7 */
1.19573 ++ /* Initial guess : r11 */
1.19574 ++ /* New approximations : r11 */
1.19575 ++ /* Dividend : r12 */
1.19576 ++
1.19577 ++ /* Load TWO */
1.19578 ++ mov_imm r10, 0x80000000
1.19579 ++
1.19580 ++ lsr r12, 2 /* Get significand of Op1 in 2.30 format */
1.19581 ++ lsr r5, r11, 2 /* Get significand of Op2 (=d) in 2.30 format */
1.19582 ++
1.19583 ++ /* Load initial guess, using look-up table */
1.19584 ++ /* Initial guess is of format 01.XY, where XY is constructed as follows: */
1.19585 ++ /* Let d be of following format: 00.1xy....., then XY=~xy */
1.19586 ++ /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
1.19587 ++ /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
1.19588 ++ /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
1.19589 ++ /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
1.19590 ++
1.19591 ++ lsr r11, r10, 1
1.19592 ++ bfextu r6, r5, 27, 2
1.19593 ++ com r6
1.19594 ++ bfins r11, r6, 28, 2
1.19595 ++
1.19596 ++ /* First approximation */
1.19597 ++ /* r7 = R[j]*d */
1.19598 ++ mulu.d r6, r11, r5
1.19599 ++ /* r7 = 2-R[j]*d */
1.19600 ++ sub r7, r10, r7<<2
1.19601 ++ /* r11 = R[j]*(2-R[j]*d) */
1.19602 ++ mulu.d r6, r11, r7
1.19603 ++ lsl r11, r7, 2
1.19604 ++
1.19605 ++ /* Second approximation */
1.19606 ++ /* r7 = R[j]*d */
1.19607 ++ mulu.d r6, r11, r5
1.19608 ++ /* r7 = 2-R[j]*d */
1.19609 ++ sub r7, r10, r7<<2
1.19610 ++ /* r11 = R[j]*(2-R[j]*d) */
1.19611 ++ mulu.d r6, r11, r7
1.19612 ++ lsl r11, r7, 2
1.19613 ++
1.19614 ++ /* Third approximation */
1.19615 ++ /* r7 = R[j]*d */
1.19616 ++ mulu.d r6, r11, r5
1.19617 ++ /* r7 = 2-R[j]*d */
1.19618 ++ sub r7, r10, r7<<2
1.19619 ++ /* r11 = R[j]*(2-R[j]*d) */
1.19620 ++ mulu.d r6, r11, r7
1.19621 ++ lsl r11, r7, 2
1.19622 ++
1.19623 ++ /* Fourth approximation */
1.19624 ++ /* r7 = R[j]*d */
1.19625 ++ mulu.d r6, r11, r5
1.19626 ++ /* r7 = 2-R[j]*d */
1.19627 ++ sub r7, r10, r7<<2
1.19628 ++ /* r11 = R[j]*(2-R[j]*d) */
1.19629 ++ mulu.d r6, r11, r7
1.19630 ++ lsl r11, r7, 2
1.19631 ++
1.19632 ++
1.19633 ++ /* Multiply with dividend to get quotient, r7 = sf(op1)/sf(op2) */
1.19634 ++ mulu.d r6, r11, r12
1.19635 ++
1.19636 ++ /* Shift by 3 to get result in 1.31 format, as required by the exponent. */
1.19637 ++ /* Note that 1.31 format is already used by the exponent in r9, since */
1.19638 ++ /* a bias of 127 was added to the result exponent, even though the implicit */
1.19639 ++ /* bit was inserted. This gives the exponent an additional bias of 1, which */
1.19640 ++ /* supports 1.31 format. */
1.19641 ++ //lsl r10, r7, 3
1.19642 ++
1.19643 ++ /* Adjust exponent and mantissa in case the result is of format
1.19644 ++ 0000.1xxx to 0001.xxx*/
1.19645 ++#if defined(L_avr32_f32_div)
1.19646 ++ lsr r12, 4 /* Scale dividend to 6.26 format to match the
1.19647 ++ result of the multiplication of the divisor and
1.19648 ++ quotient to get the remainder. */
1.19649 ++#endif
1.19650 ++ bld r7, 31-3
1.19651 ++ breq 0f
1.19652 ++ lsl r7, 1
1.19653 ++ sub r9, 1
1.19654 ++#if defined(L_avr32_f32_div)
1.19655 ++ lsl r12, 1 /* Scale dividend to 5.27 format to match the
1.19656 ++ result of the multiplication of the divisor and
1.19657 ++ quotient to get the remainder. */
1.19658 ++#endif
1.19659 ++0:
1.19660 ++ cp r9, 0
1.19661 ++ brle __avr32_f32_div_res_subnormal /* Result was subnormal. */
1.19662 ++
1.19663 ++
1.19664 ++#if defined(L_avr32_f32_div)
1.19665 ++ /* In order to round correctly we calculate the remainder:
1.19666 ++ Remainder = dividend[r12] - divisor[r5]*quotient[r7]
1.19667 ++ for the case when the quotient is halfway between the round-up
1.19668 ++ value and the round down value. If the remainder then is negative
1.19669 ++ it means that the quotient was to big and that it should not be
1.19670 ++ rounded up, if the remainder is positive the quotient was to small
1.19671 ++ and we need to round up. If the remainder is zero it means that the
1.19672 ++ quotient is exact but since we need to remove the guard bit we should
1.19673 ++ round to even. */
1.19674 ++ andl r7, 0xffe0
1.19675 ++ orl r7, 0x0010
1.19676 ++
1.19677 ++ /* Now do the multiplication. The quotient has the format 4.28
1.19678 ++ while the divisor has the format 2.30 which gives a result
1.19679 ++ of 6.26 */
1.19680 ++ mulu.d r10, r5, r7
1.19681 ++
1.19682 ++ /* Check if remainder is positive, negative or equal. */
1.19683 ++ bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
1.19684 ++ cp r10, 0
1.19685 ++__avr32_f32_div_round_subnormal:
1.19686 ++ cpc r11, r12
1.19687 ++ srlo r11 /* Remainder positive: we need to round up.*/
1.19688 ++ moveq r11, r5 /* Remainder zero: round up if mantissa odd. */
1.19689 ++#else
1.19690 ++ bfextu r11, r7, 4, 1 /* Get guard bit */
1.19691 ++#endif
1.19692 ++
1.19693 ++ /* Pack final result*/
1.19694 ++ lsr r12, r7, 5
1.19695 ++ bfins r12, r9, 23, 8
1.19696 ++ /* For UC3, load with postincrement is faster than ldm */
1.19697 ++ ld.d r6, sp++
1.19698 ++ ld.w r5, sp++
1.19699 ++ bld r8, 31
1.19700 ++ bst r12, 31
1.19701 ++ /* Rounding add. */
1.19702 ++ add r12, r11
1.19703 ++ ret r12
1.19704 ++
1.19705 ++__divsf_return_op1:
1.19706 ++ lsl r8, 1
1.19707 ++ ror r12
1.19708 ++ ret r12
1.19709 ++
1.19710 ++
1.19711 ++2:
1.19712 ++ /* Op1 is NaN or inf */
1.19713 ++ retne -1 /* Return NaN if op1 is NaN */
1.19714 ++ /* Op1 is inf check op2 */
1.19715 ++ mov_imm r9, 0xff000000
1.19716 ++ cp r11, r9
1.19717 ++ brlo __divsf_return_op1 /* inf/number gives inf */
1.19718 ++ ret -1 /* The rest gives NaN*/
1.19719 ++3:
1.19720 ++ /* Op2 is NaN or inf */
1.19721 ++ reteq 0 /* Return zero if number/inf*/
1.19722 ++ ret -1 /* Return NaN*/
1.19723 ++4:
1.19724 ++ /* Op2 is zero ? */
1.19725 ++ tst r12,r12
1.19726 ++ reteq -1 /* 0.0/0.0 is NaN */
1.19727 ++ /* Nonzero/0.0 is Inf. Sign bit will be shifted in before returning*/
1.19728 ++ mov_imm r12, 0xff000000
1.19729 ++ rjmp __divsf_return_op1
1.19730 ++
1.19731 ++11: /* Op1 was denormal. Fix it. */
1.19732 ++ lsl r12,7
1.19733 ++ clz r9,r12
1.19734 ++ lsl r12,r12,r9
1.19735 ++ rsub r9,r9,1
1.19736 ++ rjmp 12b
1.19737 ++
1.19738 ++13: /* Op2 was denormal. Fix it. */
1.19739 ++ lsl r11,7
1.19740 ++ clz r10,r11
1.19741 ++ lsl r11,r11,r10
1.19742 ++ rsub r10,r10,1
1.19743 ++ rjmp 14b
1.19744 ++
1.19745 ++
1.19746 ++__avr32_f32_div_res_subnormal: /* Divide result was subnormal */
1.19747 ++#if defined(L_avr32_f32_div)
1.19748 ++ /* Check how much we must scale down the mantissa. */
1.19749 ++ neg r9
1.19750 ++ sub r9, -1 /* We do no longer have an implicit bit. */
1.19751 ++ satu r9 >> 0, 5 /* Saturate shift amount to max 32. */
1.19752 ++ /* Scale down quotient */
1.19753 ++ rsub r10, r9, 32
1.19754 ++ lsr r7, r7, r9
1.19755 ++ /* Scale down the dividend to match the scaling of the quotient. */
1.19756 ++ lsl r6, r12, r10 /* Make the divident 64-bit and put the lsw in r6 */
1.19757 ++ lsr r12, r12, r9
1.19758 ++
1.19759 ++ /* Start performing the same rounding as done for normal numbers
1.19760 ++ but this time we have scaled the quotient and dividend and hence
1.19761 ++ need a little different comparison. */
1.19762 ++ andl r7, 0xffe0
1.19763 ++ orl r7, 0x0010
1.19764 ++
1.19765 ++ /* Now do the multiplication. The quotient has the format 4.28
1.19766 ++ while the divisor has the format 2.30 which gives a result
1.19767 ++ of 6.26 */
1.19768 ++ mulu.d r10, r5, r7
1.19769 ++
1.19770 ++ /* Set exponent to 0 */
1.19771 ++ mov r9, 0
1.19772 ++
1.19773 ++ /* Check if remainder is positive, negative or equal. */
1.19774 ++ bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
1.19775 ++ cp r10, r6
1.19776 ++ rjmp __avr32_f32_div_round_subnormal
1.19777 ++
1.19778 ++#else
1.19779 ++ ld.d r6, sp++
1.19780 ++ ld.w r5, sp++
1.19781 ++ /*Flush to zero*/
1.19782 ++ ret 0
1.19783 ++#endif
1.19784 ++#endif
1.19785 ++
1.19786 ++#ifdef L_avr32_f32_mul
1.19787 ++ .global __avr32_f32_mul
1.19788 ++ .type __avr32_f32_mul,@function
1.19789 ++
1.19790 ++
1.19791 ++__avr32_f32_mul:
1.19792 ++ mov r8, r12
1.19793 ++ eor r12, r11 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
1.19794 ++ andh r12, 0x8000, COH
1.19795 ++
1.19796 ++ /* arrange operands so that that op1 >= op2 */
1.19797 ++ cbr r8, 31
1.19798 ++ breq __avr32_f32_mul_op1_zero
1.19799 ++ cbr r11, 31
1.19800 ++
1.19801 ++ /* Put the number with the largest exponent in r10
1.19802 ++ and the number with the smallest exponent in r9 */
1.19803 ++ max r10, r8, r11
1.19804 ++ min r9, r8, r11
1.19805 ++
1.19806 ++ /* Unpack exponent and mantissa of op1 */
1.19807 ++ lsl r8, r10, 8
1.19808 ++ sbr r8, 31 /* Set implicit bit. */
1.19809 ++ lsr r10, 23
1.19810 ++
1.19811 ++ /* op1 is NaN or Inf. */
1.19812 ++ cp.w r10, 0xff
1.19813 ++ breq __avr32_f32_mul_op1_nan_or_inf
1.19814 ++
1.19815 ++ /* Unpack exponent and mantissa of op2 */
1.19816 ++ lsl r11, r9, 8
1.19817 ++ sbr r11, 31 /* Set implicit bit. */
1.19818 ++ lsr r9, 23
1.19819 ++
1.19820 ++ /* op2 is either zero or subnormal. */
1.19821 ++ breq __avr32_f32_mul_op2_subnormal
1.19822 ++0:
1.19823 ++ /* Calculate new exponent */
1.19824 ++ add r9,r10
1.19825 ++
1.19826 ++ /* Do the multiplication */
1.19827 ++ mulu.d r10,r8,r11
1.19828 ++
1.19829 ++ /* We might need to scale up by two if the MSB of the result is
1.19830 ++ zero. */
1.19831 ++ lsl r8, r11, 1
1.19832 ++ movcc r11, r8
1.19833 ++ subcc r9, 1
1.19834 ++
1.19835 ++ /* Put the shifted out bits of the mantissa into r10 */
1.19836 ++ lsr r10, 8
1.19837 ++ bfins r10, r11, 24, 8
1.19838 ++
1.19839 ++ sub r9,(127-1) /* remove extra exponent bias */
1.19840 ++ brle __avr32_f32_mul_res_subnormal
1.19841 ++
1.19842 ++ /* Check for Inf. */
1.19843 ++ cp.w r9, 0xff
1.19844 ++ brge 1f
1.19845 ++
1.19846 ++ /* Pack result. */
1.19847 ++ or r12, r12, r11 >> 8
1.19848 ++ bfins r12, r9, 23, 8
1.19849 ++
1.19850 ++ /* Round */
1.19851 ++__avr32_f32_mul_round:
1.19852 ++ mov_imm r8, 0x80000000
1.19853 ++ bld r12, 0
1.19854 ++ subne r8, -1
1.19855 ++
1.19856 ++ cp.w r10, r8
1.19857 ++ subhs r12, -1
1.19858 ++
1.19859 ++ ret r12
1.19860 ++
1.19861 ++1:
1.19862 ++ /* Return Inf */
1.19863 ++ orh r12, 0x7f80
1.19864 ++ ret r12
1.19865 ++
1.19866 ++__avr32_f32_mul_op2_subnormal:
1.19867 ++ cbr r11, 31
1.19868 ++ clz r9, r11
1.19869 ++ retcs 0 /* op2 is zero. Return 0 */
1.19870 ++ lsl r11, r11, r9
1.19871 ++ rsub r9, r9, 1
1.19872 ++
1.19873 ++ /* Check if op2 is subnormal. */
1.19874 ++ tst r10, r10
1.19875 ++ brne 0b
1.19876 ++
1.19877 ++ /* op2 is subnormal */
1.19878 ++ cbr r8, 31
1.19879 ++ clz r10, r11
1.19880 ++ retcs 0 /* op1 is zero. Return 0 */
1.19881 ++ lsl r8, r8, r10
1.19882 ++ rsub r10, r10, 1
1.19883 ++
1.19884 ++ rjmp 0b
1.19885 ++
1.19886 ++
1.19887 ++__avr32_f32_mul_op1_nan_or_inf:
1.19888 ++ /* Check if op1 is NaN, if so return NaN */
1.19889 ++ lsl r11, r8, 1
1.19890 ++ retne -1
1.19891 ++
1.19892 ++ /* op1 is Inf. */
1.19893 ++ tst r9, r9
1.19894 ++ reteq -1 /* Inf * 0 -> NaN */
1.19895 ++
1.19896 ++ bfins r12, r10, 23, 8 /* Generate Inf in r12 */
1.19897 ++
1.19898 ++ /* Check if op2 is Inf. or NaN */
1.19899 ++ lsr r11, r9, 23
1.19900 ++ cp.w r11, 0xff
1.19901 ++ retne r12 /* op2 not Inf or NaN, return Info */
1.19902 ++
1.19903 ++ lsl r9, 9
1.19904 ++ reteq r12 /* op2 Inf return Inf */
1.19905 ++ ret -1 /* op2 is NaN, return NaN */
1.19906 ++
1.19907 ++__avr32_f32_mul_res_subnormal:
1.19908 ++ /* Check if the number is so small that
1.19909 ++ it will be represented with zero. */
1.19910 ++ rsub r9, r9, 9
1.19911 ++ rsub r8, r9, 32
1.19912 ++ retcs 0
1.19913 ++
1.19914 ++ /* Shift the mantissa into the correct position.*/
1.19915 ++ lsr r9, r11, r9
1.19916 ++ /* Add sign bit. */
1.19917 ++ or r12, r9
1.19918 ++ /* Put the shifted out bits in the most significant part
1.19919 ++ of r8. */
1.19920 ++ lsl r11, r11, r8
1.19921 ++
1.19922 ++ /* Add all the remainder bits used for rounding into r11 */
1.19923 ++ andh r10, 0x00FF
1.19924 ++ or r10, r11
1.19925 ++ rjmp __avr32_f32_mul_round
1.19926 ++
1.19927 ++__avr32_f32_mul_op1_zero:
1.19928 ++ bfextu r10, r11, 23, 8
1.19929 ++ cp.w r10, 0xff
1.19930 ++ retne r12
1.19931 ++ reteq -1
1.19932 ++
1.19933 ++#endif
1.19934 ++
1.19935 ++
1.19936 ++#ifdef L_avr32_s32_to_f32
1.19937 ++ .global __avr32_s32_to_f32
1.19938 ++ .type __avr32_s32_to_f32,@function
1.19939 ++__avr32_s32_to_f32:
1.19940 ++ cp r12, 0
1.19941 ++ reteq r12 /* If zero then return zero float */
1.19942 ++ mov r11, r12 /* Keep the sign */
1.19943 ++ abs r12 /* Compute the absolute value */
1.19944 ++ mov r10, 31 + 127 /* Set the correct exponent */
1.19945 ++
1.19946 ++ /* Normalize */
1.19947 ++ normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
1.19948 ++
1.19949 ++ /* Check for subnormal result */
1.19950 ++ cp.w r10, 0
1.19951 ++ brle __avr32_s32_to_f32_subnormal
1.19952 ++
1.19953 ++ round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
1.19954 ++ pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
1.19955 ++ lsl r11, 1
1.19956 ++ ror r12
1.19957 ++ ret r12
1.19958 ++
1.19959 ++__avr32_s32_to_f32_subnormal:
1.19960 ++ /* Adjust a subnormal result */
1.19961 ++ adjust_subnormal_sf r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/
1.19962 ++ ret r12
1.19963 ++
1.19964 ++#endif
1.19965 ++
1.19966 ++#ifdef L_avr32_u32_to_f32
1.19967 ++ .global __avr32_u32_to_f32
1.19968 ++ .type __avr32_u32_to_f32,@function
1.19969 ++__avr32_u32_to_f32:
1.19970 ++ cp r12, 0
1.19971 ++ reteq r12 /* If zero then return zero float */
1.19972 ++ mov r10, 31 + 127 /* Set the correct exponent */
1.19973 ++
1.19974 ++ /* Normalize */
1.19975 ++ normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
1.19976 ++
1.19977 ++ /* Check for subnormal result */
1.19978 ++ cp.w r10, 0
1.19979 ++ brle __avr32_u32_to_f32_subnormal
1.19980 ++
1.19981 ++ round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
1.19982 ++ pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
1.19983 ++ lsr r12,1 /* Sign bit is 0 for unsigned int */
1.19984 ++ ret r12
1.19985 ++
1.19986 ++__avr32_u32_to_f32_subnormal:
1.19987 ++ /* Adjust a subnormal result */
1.19988 ++ mov r8, 0
1.19989 ++ adjust_subnormal_sf r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/
1.19990 ++ ret r12
1.19991 ++
1.19992 ++
1.19993 ++#endif
1.19994 ++
1.19995 ++
1.19996 ++#ifdef L_avr32_f32_to_s32
1.19997 ++ .global __avr32_f32_to_s32
1.19998 ++ .type __avr32_f32_to_s32,@function
1.19999 ++__avr32_f32_to_s32:
1.20000 ++ bfextu r11, r12, 23, 8
1.20001 ++ sub r11,127 /* Fix bias */
1.20002 ++ retlo 0 /* Negative exponent yields zero integer */
1.20003 ++
1.20004 ++ /* Shift mantissa into correct position */
1.20005 ++ rsub r11,r11,31 /* Shift amount */
1.20006 ++ lsl r10,r12,8 /* Get mantissa */
1.20007 ++ sbr r10,31 /* Add implicit bit */
1.20008 ++ lsr r10,r10,r11 /* Perform shift */
1.20009 ++ lsl r12,1 /* Check sign */
1.20010 ++ retcc r10 /* if positive, we are done */
1.20011 ++ neg r10 /* if negative float, negate result */
1.20012 ++ ret r10
1.20013 ++
1.20014 ++#endif
1.20015 ++
1.20016 ++#ifdef L_avr32_f32_to_u32
1.20017 ++ .global __avr32_f32_to_u32
1.20018 ++ .type __avr32_f32_to_u32,@function
1.20019 ++__avr32_f32_to_u32:
1.20020 ++ cp r12,0
1.20021 ++ retmi 0 /* Negative numbers gives 0 */
1.20022 ++ bfextu r11, r12, 23, 8 /* Extract exponent */
1.20023 ++ sub r11,127 /* Fix bias */
1.20024 ++ retlo 0 /* Negative exponent yields zero integer */
1.20025 ++
1.20026 ++ /* Shift mantissa into correct position */
1.20027 ++ rsub r11,r11,31 /* Shift amount */
1.20028 ++ lsl r12,8 /* Get mantissa */
1.20029 ++ sbr r12,31 /* Add implicit bit */
1.20030 ++ lsr r12,r12,r11 /* Perform shift */
1.20031 ++ ret r12
1.20032 ++
1.20033 ++#endif
1.20034 ++
1.20035 ++#ifdef L_avr32_f32_to_f64
1.20036 ++ .global __avr32_f32_to_f64
1.20037 ++ .type __avr32_f32_to_f64,@function
1.20038 ++
1.20039 ++__avr32_f32_to_f64:
1.20040 ++ lsl r11,r12,1 /* Remove sign bit, keep original value in r12*/
1.20041 ++ moveq r10, 0
1.20042 ++ reteq r11 /* Return zero if input is zero */
1.20043 ++
1.20044 ++ bfextu r9,r11,24,8 /* Get exponent */
1.20045 ++ cp.w r9,0xff /* check for NaN or inf */
1.20046 ++ breq 0f
1.20047 ++
1.20048 ++ lsl r11,7 /* Convert sf mantissa to df format */
1.20049 ++ mov r10,0
1.20050 ++
1.20051 ++ /* Check if implicit bit should be set */
1.20052 ++ cp.w r9, 0
1.20053 ++ subeq r9,-1 /* Adjust exponent if it was 0 */
1.20054 ++ srne r8
1.20055 ++ or r11, r11, r8 << 31 /* Set implicit bit if needed */
1.20056 ++ sub r9,(127-0x3ff) /* Convert exponent to df format exponent */
1.20057 ++
1.20058 ++ /*We know that low register of mantissa is 0, and will be unaffected by normalization.*/
1.20059 ++ /*We can therefore use the faster normalize_sf function instead of normalize_df.*/
1.20060 ++ normalize_sf r9 /*exp*/, r11 /*mantissa*/, r8 /*scratch*/
1.20061 ++ pack_df r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/
1.20062 ++
1.20063 ++__extendsfdf_return_op1:
1.20064 ++ /* Rotate in sign bit */
1.20065 ++ lsl r12, 1
1.20066 ++ ror r11
1.20067 ++ ret r11
1.20068 ++
1.20069 ++0:
1.20070 ++ /* Inf or NaN*/
1.20071 ++ mov_imm r10, 0xffe00000
1.20072 ++ lsl r11,8 /* check mantissa */
1.20073 ++ movne r11, -1 /* Return NaN */
1.20074 ++ moveq r11, r10 /* Return inf */
1.20075 ++ rjmp __extendsfdf_return_op1
1.20076 ++#endif
1.20077 ++
1.20078 ++
1.20079 ++#ifdef L_avr32_f64_to_f32
1.20080 ++ .global __avr32_f64_to_f32
1.20081 ++ .type __avr32_f64_to_f32,@function
1.20082 ++
1.20083 ++__avr32_f64_to_f32:
1.20084 ++ /* Unpack */
1.20085 ++ lsl r9,r11,1 /* Unpack exponent */
1.20086 ++ lsr r9,21
1.20087 ++
1.20088 ++ reteq 0 /* If exponent is 0 the number is so small
1.20089 ++ that the conversion to single float gives
1.20090 ++ zero */
1.20091 ++
1.20092 ++ lsl r8,r11,10 /* Adjust mantissa */
1.20093 ++ or r12,r8,r10>>22
1.20094 ++
1.20095 ++ lsl r10,10 /* Check if there are any remaining bits
1.20096 ++ in the low part of the mantissa.*/
1.20097 ++ neg r10
1.20098 ++ rol r12 /* If there were remaining bits then set lsb
1.20099 ++ of mantissa to 1 */
1.20100 ++
1.20101 ++ cp r9,0x7ff
1.20102 ++ breq 2f /* Check for NaN or inf */
1.20103 ++
1.20104 ++ sub r9,(0x3ff-127) /* Adjust bias of exponent */
1.20105 ++ sbr r12,31 /* set the implicit bit.*/
1.20106 ++
1.20107 ++ cp.w r9, 0 /* Check for subnormal number */
1.20108 ++ brle 3f
1.20109 ++
1.20110 ++ round_sf r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/
1.20111 ++ pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
1.20112 ++__truncdfsf_return_op1:
1.20113 ++ /* Rotate in sign bit */
1.20114 ++ lsl r11, 1
1.20115 ++ ror r12
1.20116 ++ ret r12
1.20117 ++
1.20118 ++2:
1.20119 ++ /* NaN or inf */
1.20120 ++ cbr r12,31 /* clear implicit bit */
1.20121 ++ retne -1 /* Return NaN if mantissa not zero */
1.20122 ++ mov_imm r12, 0xff000000
1.20123 ++ ret r12 /* Return inf */
1.20124 ++
1.20125 ++3: /* Result is subnormal. Adjust it.*/
1.20126 ++ adjust_subnormal_sf r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/
1.20127 ++ ret r12
1.20128 ++
1.20129 ++
1.20130 ++#endif
1.20131 ++
1.20132 ++#if defined(L_mulsi3) && (__AVR32_UC__ == 3)
1.20133 ++ .global __mulsi3
1.20134 ++ .type __mulsi3,@function
1.20135 ++
1.20136 ++__mulsi3:
1.20137 ++ mov r9, 0
1.20138 ++0:
1.20139 ++ lsr r11, 1
1.20140 ++ addcs r9, r9, r12
1.20141 ++ breq 1f
1.20142 ++ lsl r12, 1
1.20143 ++ rjmp 0b
1.20144 ++1:
1.20145 ++ ret r9
1.20146 ++#endif
1.20147 +--- /dev/null
1.20148 ++++ b/gcc/config/avr32/lib2funcs.S
1.20149 +@@ -0,0 +1,21 @@
1.20150 ++ .align 4
1.20151 ++ .global __nonlocal_goto
1.20152 ++ .type __nonlocal_goto,@function
1.20153 ++
1.20154 ++/* __nonlocal_goto: This function handles nonlocal_goto's in gcc.
1.20155 ++
1.20156 ++ parameter 0 (r12) = New Frame Pointer
1.20157 ++ parameter 1 (r11) = Address to goto
1.20158 ++ parameter 2 (r10) = New Stack Pointer
1.20159 ++
1.20160 ++ This function invalidates the return stack, since it returns from a
1.20161 ++ function without using a return instruction.
1.20162 ++*/
1.20163 ++__nonlocal_goto:
1.20164 ++ mov r7, r12
1.20165 ++ mov sp, r10
1.20166 ++ frs # Flush return stack
1.20167 ++ mov pc, r11
1.20168 ++
1.20169 ++
1.20170 ++
1.20171 +--- /dev/null
1.20172 ++++ b/gcc/config/avr32/linux-elf.h
1.20173 +@@ -0,0 +1,151 @@
1.20174 ++/*
1.20175 ++ Linux/Elf specific definitions.
1.20176 ++ Copyright 2003-2006 Atmel Corporation.
1.20177 ++
1.20178 ++ Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.20179 ++ and H�vard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
1.20180 ++
1.20181 ++ This file is part of GCC.
1.20182 ++
1.20183 ++ This program is free software; you can redistribute it and/or modify
1.20184 ++ it under the terms of the GNU General Public License as published by
1.20185 ++ the Free Software Foundation; either version 2 of the License, or
1.20186 ++ (at your option) any later version.
1.20187 ++
1.20188 ++ This program is distributed in the hope that it will be useful,
1.20189 ++ but WITHOUT ANY WARRANTY; without even the implied warranty of
1.20190 ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.20191 ++ GNU General Public License for more details.
1.20192 ++
1.20193 ++ You should have received a copy of the GNU General Public License
1.20194 ++ along with this program; if not, write to the Free Software
1.20195 ++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
1.20196 ++
1.20197 ++
1.20198 ++
1.20199 ++/* elfos.h should have already been included. Now just override
1.20200 ++ any conflicting definitions and add any extras. */
1.20201 ++
1.20202 ++/* Run-time Target Specification. */
1.20203 ++#undef TARGET_VERSION
1.20204 ++#define TARGET_VERSION fputs (" (AVR32 GNU/Linux with ELF)", stderr);
1.20205 ++
1.20206 ++/* Do not assume anything about header files. */
1.20207 ++#define NO_IMPLICIT_EXTERN_C
1.20208 ++
1.20209 ++/* The GNU C++ standard library requires that these macros be defined. */
1.20210 ++#undef CPLUSPLUS_CPP_SPEC
1.20211 ++#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
1.20212 ++
1.20213 ++/* Now we define the strings used to build the spec file. */
1.20214 ++#undef LIB_SPEC
1.20215 ++#define LIB_SPEC \
1.20216 ++ "%{pthread:-lpthread} \
1.20217 ++ %{shared:-lc} \
1.20218 ++ %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
1.20219 ++
1.20220 ++/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
1.20221 ++ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
1.20222 ++ provides part of the support for getting C++ file-scope static
1.20223 ++ object constructed before entering `main'. */
1.20224 ++
1.20225 ++#undef STARTFILE_SPEC
1.20226 ++#define STARTFILE_SPEC \
1.20227 ++ "%{!shared: \
1.20228 ++ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
1.20229 ++ %{!p:%{profile:gcrt1.o%s} \
1.20230 ++ %{!profile:crt1.o%s}}}} \
1.20231 ++ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
1.20232 ++
1.20233 ++/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
1.20234 ++ the GNU/Linux magical crtend.o file (see crtstuff.c) which
1.20235 ++ provides part of the support for getting C++ file-scope static
1.20236 ++ object constructed before entering `main', followed by a normal
1.20237 ++ GNU/Linux "finalizer" file, `crtn.o'. */
1.20238 ++
1.20239 ++#undef ENDFILE_SPEC
1.20240 ++#define ENDFILE_SPEC \
1.20241 ++ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
1.20242 ++
1.20243 ++#undef ASM_SPEC
1.20244 ++#define ASM_SPEC "%{!mno-pic:%{!fno-pic:--pic}} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
1.20245 ++
1.20246 ++#undef LINK_SPEC
1.20247 ++#define LINK_SPEC "%{version:-v} \
1.20248 ++ %{static:-Bstatic} \
1.20249 ++ %{shared:-shared} \
1.20250 ++ %{symbolic:-Bsymbolic} \
1.20251 ++ %{rdynamic:-export-dynamic} \
1.20252 ++ %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \
1.20253 ++ %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}"
1.20254 ++
1.20255 ++#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
1.20256 ++
1.20257 ++/* This is how we tell the assembler that two symbols have the same value. */
1.20258 ++#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
1.20259 ++ do \
1.20260 ++ { \
1.20261 ++ assemble_name (FILE, NAME1); \
1.20262 ++ fputs (" = ", FILE); \
1.20263 ++ assemble_name (FILE, NAME2); \
1.20264 ++ fputc ('\n', FILE); \
1.20265 ++ } \
1.20266 ++ while (0)
1.20267 ++
1.20268 ++
1.20269 ++
1.20270 ++#undef CC1_SPEC
1.20271 ++#define CC1_SPEC "%{profile:-p}"
1.20272 ++
1.20273 ++/* Target CPU builtins. */
1.20274 ++#define TARGET_CPU_CPP_BUILTINS() \
1.20275 ++ do \
1.20276 ++ { \
1.20277 ++ builtin_define ("__avr32__"); \
1.20278 ++ builtin_define ("__AVR32__"); \
1.20279 ++ builtin_define ("__AVR32_LINUX__"); \
1.20280 ++ builtin_define (avr32_part->macro); \
1.20281 ++ builtin_define (avr32_arch->macro); \
1.20282 ++ if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
1.20283 ++ builtin_define ("__AVR32_AVR32A__"); \
1.20284 ++ else \
1.20285 ++ builtin_define ("__AVR32_AVR32B__"); \
1.20286 ++ if (TARGET_UNALIGNED_WORD) \
1.20287 ++ builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
1.20288 ++ if (TARGET_SIMD) \
1.20289 ++ builtin_define ("__AVR32_HAS_SIMD__"); \
1.20290 ++ if (TARGET_DSP) \
1.20291 ++ builtin_define ("__AVR32_HAS_DSP__"); \
1.20292 ++ if (TARGET_RMW) \
1.20293 ++ builtin_define ("__AVR32_HAS_RMW__"); \
1.20294 ++ if (TARGET_BRANCH_PRED) \
1.20295 ++ builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
1.20296 ++ if (TARGET_FAST_FLOAT) \
1.20297 ++ builtin_define ("__AVR32_FAST_FLOAT__"); \
1.20298 ++ } \
1.20299 ++ while (0)
1.20300 ++
1.20301 ++
1.20302 ++
1.20303 ++/* Call the function profiler with a given profile label. */
1.20304 ++#undef FUNCTION_PROFILER
1.20305 ++#define FUNCTION_PROFILER(STREAM, LABELNO) \
1.20306 ++ do \
1.20307 ++ { \
1.20308 ++ fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \
1.20309 ++ fprintf (STREAM, "\ticall lr\n"); \
1.20310 ++ } \
1.20311 ++ while (0)
1.20312 ++
1.20313 ++#define NO_PROFILE_COUNTERS 1
1.20314 ++
1.20315 ++/* For dynamic libraries to work */
1.20316 ++/* #define PLT_REG_CALL_CLOBBERED 1 */
1.20317 ++#define AVR32_ALWAYS_PIC 1
1.20318 ++
1.20319 ++/* uclibc does not implement sinf, cosf etc. */
1.20320 ++#undef TARGET_C99_FUNCTIONS
1.20321 ++#define TARGET_C99_FUNCTIONS 0
1.20322 ++
1.20323 ++#define LINK_GCC_C_SEQUENCE_SPEC \
1.20324 ++ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
1.20325 +--- /dev/null
1.20326 ++++ b/gcc/config/avr32/predicates.md
1.20327 +@@ -0,0 +1,419 @@
1.20328 ++;; AVR32 predicates file.
1.20329 ++;; Copyright 2003-2006 Atmel Corporation.
1.20330 ++;;
1.20331 ++;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.20332 ++;;
1.20333 ++;; This file is part of GCC.
1.20334 ++;;
1.20335 ++;; This program is free software; you can redistribute it and/or modify
1.20336 ++;; it under the terms of the GNU General Public License as published by
1.20337 ++;; the Free Software Foundation; either version 2 of the License, or
1.20338 ++;; (at your option) any later version.
1.20339 ++;;
1.20340 ++;; This program is distributed in the hope that it will be useful,
1.20341 ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
1.20342 ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.20343 ++;; GNU General Public License for more details.
1.20344 ++;;
1.20345 ++;; You should have received a copy of the GNU General Public License
1.20346 ++;; along with this program; if not, write to the Free Software
1.20347 ++;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
1.20348 ++
1.20349 ++
1.20350 ++;; True if the operand is a memory reference which contains an
1.20351 ++;; Address consisting of a single pointer register
1.20352 ++(define_predicate "avr32_indirect_register_operand"
1.20353 ++ (and (match_code "mem")
1.20354 ++ (match_test "register_operand(XEXP(op, 0), SImode)")))
1.20355 ++
1.20356 ++
1.20357 ++
1.20358 ++;; Address expression with a base pointer offset with
1.20359 ++;; a register displacement
1.20360 ++(define_predicate "avr32_indexed_memory_operand"
1.20361 ++ (and (match_code "mem")
1.20362 ++ (match_test "GET_CODE(XEXP(op, 0)) == PLUS"))
1.20363 ++ {
1.20364 ++
1.20365 ++ rtx op0 = XEXP(XEXP(op, 0), 0);
1.20366 ++ rtx op1 = XEXP(XEXP(op, 0), 1);
1.20367 ++
1.20368 ++ return ((avr32_address_register_rtx_p (op0, 0)
1.20369 ++ && avr32_legitimate_index_p (GET_MODE(op), op1, 0))
1.20370 ++ || (avr32_address_register_rtx_p (op1, 0)
1.20371 ++ && avr32_legitimate_index_p (GET_MODE(op), op0, 0)));
1.20372 ++
1.20373 ++ })
1.20374 ++
1.20375 ++;; Operand suitable for the ld.sb instruction
1.20376 ++(define_predicate "load_sb_memory_operand"
1.20377 ++ (ior (match_operand 0 "avr32_indirect_register_operand")
1.20378 ++ (match_operand 0 "avr32_indexed_memory_operand")))
1.20379 ++
1.20380 ++
1.20381 ++;; Operand suitable as operand to insns sign extending QI values
1.20382 ++(define_predicate "extendqi_operand"
1.20383 ++ (ior (match_operand 0 "load_sb_memory_operand")
1.20384 ++ (match_operand 0 "register_operand")))
1.20385 ++
1.20386 ++(define_predicate "post_inc_memory_operand"
1.20387 ++ (and (match_code "mem")
1.20388 ++ (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC)
1.20389 ++ && REG_P(XEXP(XEXP(op, 0), 0))")))
1.20390 ++
1.20391 ++(define_predicate "pre_dec_memory_operand"
1.20392 ++ (and (match_code "mem")
1.20393 ++ (match_test "(GET_CODE(XEXP(op, 0)) == PRE_DEC)
1.20394 ++ && REG_P(XEXP(XEXP(op, 0), 0))")))
1.20395 ++
1.20396 ++;; Operand suitable for add instructions
1.20397 ++(define_predicate "avr32_add_operand"
1.20398 ++ (ior (match_operand 0 "register_operand")
1.20399 ++ (and (match_operand 0 "immediate_operand")
1.20400 ++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")"))))
1.20401 ++
1.20402 ++;; Operand is a power of two immediate
1.20403 ++(define_predicate "power_of_two_operand"
1.20404 ++ (match_code "const_int")
1.20405 ++{
1.20406 ++ HOST_WIDE_INT value = INTVAL (op);
1.20407 ++
1.20408 ++ return value != 0 && (value & (value - 1)) == 0;
1.20409 ++})
1.20410 ++
1.20411 ++;; Operand is a multiple of 8 immediate
1.20412 ++(define_predicate "multiple_of_8_operand"
1.20413 ++ (match_code "const_int")
1.20414 ++{
1.20415 ++ HOST_WIDE_INT value = INTVAL (op);
1.20416 ++
1.20417 ++ return (value & 0x7) == 0 ;
1.20418 ++})
1.20419 ++
1.20420 ++;; Operand is a multiple of 16 immediate
1.20421 ++(define_predicate "multiple_of_16_operand"
1.20422 ++ (match_code "const_int")
1.20423 ++{
1.20424 ++ HOST_WIDE_INT value = INTVAL (op);
1.20425 ++
1.20426 ++ return (value & 0xf) == 0 ;
1.20427 ++})
1.20428 ++
1.20429 ++;; Operand is a mask used for masking away upper bits of a reg
1.20430 ++(define_predicate "avr32_mask_upper_bits_operand"
1.20431 ++ (match_code "const_int")
1.20432 ++{
1.20433 ++ HOST_WIDE_INT value = INTVAL (op) + 1;
1.20434 ++
1.20435 ++ return value != 1 && value != 0 && (value & (value - 1)) == 0;
1.20436 ++})
1.20437 ++
1.20438 ++
1.20439 ++;; Operand suitable for mul instructions
1.20440 ++(define_predicate "avr32_mul_operand"
1.20441 ++ (ior (match_operand 0 "register_operand")
1.20442 ++ (and (match_operand 0 "immediate_operand")
1.20443 ++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
1.20444 ++
1.20445 ++;; True for logical binary operators.
1.20446 ++(define_predicate "logical_binary_operator"
1.20447 ++ (match_code "ior,xor,and"))
1.20448 ++
1.20449 ++;; True for logical shift operators
1.20450 ++(define_predicate "logical_shift_operator"
1.20451 ++ (match_code "ashift,lshiftrt"))
1.20452 ++
1.20453 ++;; True for shift operand for logical and, or and eor insns
1.20454 ++(define_predicate "avr32_logical_shift_operand"
1.20455 ++ (and (match_code "ashift,lshiftrt")
1.20456 ++ (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT")
1.20457 ++ (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))
1.20458 ++ (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT")
1.20459 ++ (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))"))))
1.20460 ++ )
1.20461 ++
1.20462 ++
1.20463 ++;; Predicate for second operand to and, ior and xor insn patterns
1.20464 ++(define_predicate "avr32_logical_insn_operand"
1.20465 ++ (ior (match_operand 0 "register_operand")
1.20466 ++ (match_operand 0 "avr32_logical_shift_operand"))
1.20467 ++)
1.20468 ++
1.20469 ++
1.20470 ++;; True for avr32 comparison operators
1.20471 ++(define_predicate "avr32_comparison_operator"
1.20472 ++ (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
1.20473 ++ (and (match_code "unspec")
1.20474 ++ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
1.20475 ++ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
1.20476 ++
1.20477 ++(define_predicate "avr32_cond3_comparison_operator"
1.20478 ++ (ior (match_code "eq, ne, ge, lt, geu, ltu")
1.20479 ++ (and (match_code "unspec")
1.20480 ++ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
1.20481 ++ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
1.20482 ++
1.20483 ++;; True for avr32 comparison operand
1.20484 ++(define_predicate "avr32_comparison_operand"
1.20485 ++ (ior (and (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
1.20486 ++ (match_test "(CC0_P (XEXP(op,0)) && rtx_equal_p (XEXP(op,1), const0_rtx))"))
1.20487 ++ (and (match_code "unspec")
1.20488 ++ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
1.20489 ++ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
1.20490 ++
1.20491 ++;; True if this is a const_int with one bit set
1.20492 ++(define_predicate "one_bit_set_operand"
1.20493 ++ (match_code "const_int")
1.20494 ++ {
1.20495 ++ int i;
1.20496 ++ int value;
1.20497 ++ int ones = 0;
1.20498 ++
1.20499 ++ value = INTVAL(op);
1.20500 ++ for ( i = 0 ; i < 32; i++ ){
1.20501 ++ if ( value & ( 1 << i ) ){
1.20502 ++ ones++;
1.20503 ++ }
1.20504 ++ }
1.20505 ++
1.20506 ++ return ( ones == 1 );
1.20507 ++ })
1.20508 ++
1.20509 ++
1.20510 ++;; True if this is a const_int with one bit cleared
1.20511 ++(define_predicate "one_bit_cleared_operand"
1.20512 ++ (match_code "const_int")
1.20513 ++ {
1.20514 ++ int i;
1.20515 ++ int value;
1.20516 ++ int zeroes = 0;
1.20517 ++
1.20518 ++ value = INTVAL(op);
1.20519 ++ for ( i = 0 ; i < 32; i++ ){
1.20520 ++ if ( !(value & ( 1 << i )) ){
1.20521 ++ zeroes++;
1.20522 ++ }
1.20523 ++ }
1.20524 ++
1.20525 ++ return ( zeroes == 1 );
1.20526 ++ })
1.20527 ++
1.20528 ++
1.20529 ++;; Immediate all the low 16-bits cleared
1.20530 ++(define_predicate "avr32_hi16_immediate_operand"
1.20531 ++ (match_code "const_int")
1.20532 ++ {
1.20533 ++ /* If the low 16-bits are zero then this
1.20534 ++ is a hi16 immediate. */
1.20535 ++ return ((INTVAL(op) & 0xffff) == 0);
1.20536 ++ }
1.20537 ++)
1.20538 ++
1.20539 ++;; True if this is a register or immediate operand
1.20540 ++(define_predicate "register_immediate_operand"
1.20541 ++ (ior (match_operand 0 "register_operand")
1.20542 ++ (match_operand 0 "immediate_operand")))
1.20543 ++
1.20544 ++;; True if this is a register or const_int operand
1.20545 ++(define_predicate "register_const_int_operand"
1.20546 ++ (ior (match_operand 0 "register_operand")
1.20547 ++ (and (match_operand 0 "const_int_operand")
1.20548 ++ (match_operand 0 "immediate_operand"))))
1.20549 ++
1.20550 ++;; True if this is a register or const_double operand
1.20551 ++(define_predicate "register_const_double_operand"
1.20552 ++ (ior (match_operand 0 "register_operand")
1.20553 ++ (match_operand 0 "const_double_operand")))
1.20554 ++
1.20555 ++;; True is this is an operand containing a label_ref
1.20556 ++(define_predicate "avr32_label_ref_operand"
1.20557 ++ (and (match_code "mem")
1.20558 ++ (match_test "avr32_find_symbol(op)
1.20559 ++ && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)")))
1.20560 ++
1.20561 ++;; True is this is a valid symbol pointing to the constant pool
1.20562 ++(define_predicate "avr32_const_pool_operand"
1.20563 ++ (and (match_code "symbol_ref")
1.20564 ++ (match_test "CONSTANT_POOL_ADDRESS_P(op)"))
1.20565 ++ {
1.20566 ++ return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op))
1.20567 ++ || label_mentioned_p (get_pool_constant (op)))
1.20568 ++ || avr32_got_mentioned_p(get_pool_constant (op)))
1.20569 ++ : true);
1.20570 ++ }
1.20571 ++)
1.20572 ++
1.20573 ++;; True is this is a memory reference to the constant or mini pool
1.20574 ++(define_predicate "avr32_const_pool_ref_operand"
1.20575 ++ (ior (match_operand 0 "avr32_label_ref_operand")
1.20576 ++ (and (match_code "mem")
1.20577 ++ (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))"))))
1.20578 ++
1.20579 ++
1.20580 ++;; Legal source operand for movti insns
1.20581 ++(define_predicate "avr32_movti_src_operand"
1.20582 ++ (ior (match_operand 0 "avr32_const_pool_ref_operand")
1.20583 ++ (ior (ior (match_operand 0 "register_immediate_operand")
1.20584 ++ (match_operand 0 "avr32_indirect_register_operand"))
1.20585 ++ (match_operand 0 "post_inc_memory_operand"))))
1.20586 ++
1.20587 ++;; Legal destination operand for movti insns
1.20588 ++(define_predicate "avr32_movti_dst_operand"
1.20589 ++ (ior (ior (match_operand 0 "register_operand")
1.20590 ++ (match_operand 0 "avr32_indirect_register_operand"))
1.20591 ++ (match_operand 0 "pre_dec_memory_operand")))
1.20592 ++
1.20593 ++
1.20594 ++;; True is this is a k12 offseted memory operand
1.20595 ++(define_predicate "avr32_k12_memory_operand"
1.20596 ++ (and (match_code "mem")
1.20597 ++ (ior (match_test "REG_P(XEXP(op, 0))")
1.20598 ++ (match_test "GET_CODE(XEXP(op, 0)) == PLUS
1.20599 ++ && REG_P(XEXP(XEXP(op, 0), 0))
1.20600 ++ && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
1.20601 ++ && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)),
1.20602 ++ 'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))"))))
1.20603 ++
1.20604 ++;; True is this is a memory operand with an immediate displacement
1.20605 ++(define_predicate "avr32_imm_disp_memory_operand"
1.20606 ++ (and (match_code "mem")
1.20607 ++ (match_test "GET_CODE(XEXP(op, 0)) == PLUS
1.20608 ++ && REG_P(XEXP(XEXP(op, 0), 0))
1.20609 ++ && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)")))
1.20610 ++
1.20611 ++;; True is this is a bswap operand
1.20612 ++(define_predicate "avr32_bswap_operand"
1.20613 ++ (ior (match_operand 0 "avr32_k12_memory_operand")
1.20614 ++ (match_operand 0 "register_operand")))
1.20615 ++
1.20616 ++;; True is this is a valid coprocessor insn memory operand
1.20617 ++(define_predicate "avr32_cop_memory_operand"
1.20618 ++ (and (match_operand 0 "memory_operand")
1.20619 ++ (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS
1.20620 ++ && REG_P(XEXP(XEXP(op, 0), 0))
1.20621 ++ && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
1.20622 ++ && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))"))))
1.20623 ++
1.20624 ++;; True is this is a valid source/destination operand
1.20625 ++;; for moving values to/from a coprocessor
1.20626 ++(define_predicate "avr32_cop_move_operand"
1.20627 ++ (ior (match_operand 0 "register_operand")
1.20628 ++ (match_operand 0 "avr32_cop_memory_operand")))
1.20629 ++
1.20630 ++
1.20631 ++;; True is this is a valid extract byte offset for use in
1.20632 ++;; load extracted index insns
1.20633 ++(define_predicate "avr32_extract_shift_operand"
1.20634 ++ (and (match_operand 0 "const_int_operand")
1.20635 ++ (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8)
1.20636 ++ || (INTVAL(op) == 16) || (INTVAL(op) == 24)")))
1.20637 ++
1.20638 ++;; True is this is a floating-point register
1.20639 ++(define_predicate "avr32_fp_register_operand"
1.20640 ++ (and (match_operand 0 "register_operand")
1.20641 ++ (match_test "REGNO_REG_CLASS(REGNO(op)) == FP_REGS")))
1.20642 ++
1.20643 ++;; True is this is valid avr32 symbol operand
1.20644 ++(define_predicate "avr32_symbol_operand"
1.20645 ++ (and (match_code "label_ref, symbol_ref, const")
1.20646 ++ (match_test "avr32_find_symbol(op)")))
1.20647 ++
1.20648 ++;; True is this is valid operand for the lda.w and call pseudo insns
1.20649 ++(define_predicate "avr32_address_operand"
1.20650 ++ (and (and (match_code "label_ref, symbol_ref")
1.20651 ++ (match_test "avr32_find_symbol(op)"))
1.20652 ++ (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS")
1.20653 ++ (match_test "flag_pic")) ))
1.20654 ++
1.20655 ++;; An immediate k16 address operand
1.20656 ++(define_predicate "avr32_ks16_address_operand"
1.20657 ++ (and (match_operand 0 "address_operand")
1.20658 ++ (ior (match_test "REG_P(op)")
1.20659 ++ (match_test "GET_CODE(op) == PLUS
1.20660 ++ && ((GET_CODE(XEXP(op,0)) == CONST_INT)
1.20661 ++ || (GET_CODE(XEXP(op,1)) == CONST_INT))")) ))
1.20662 ++
1.20663 ++;; An offset k16 memory operand
1.20664 ++(define_predicate "avr32_ks16_memory_operand"
1.20665 ++ (and (match_code "mem")
1.20666 ++ (match_test "avr32_ks16_address_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
1.20667 ++
1.20668 ++;; An immediate k11 address operand
1.20669 ++(define_predicate "avr32_ks11_address_operand"
1.20670 ++ (and (match_operand 0 "address_operand")
1.20671 ++ (ior (match_test "REG_P(op)")
1.20672 ++ (match_test "GET_CODE(op) == PLUS
1.20673 ++ && (((GET_CODE(XEXP(op,0)) == CONST_INT)
1.20674 ++ && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,0)), 'K', \"Ks11\"))
1.20675 ++ || ((GET_CODE(XEXP(op,1)) == CONST_INT)
1.20676 ++ && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,1)), 'K', \"Ks11\")))")) ))
1.20677 ++
1.20678 ++;; True if this is a avr32 call operand
1.20679 ++(define_predicate "avr32_call_operand"
1.20680 ++ (ior (ior (match_operand 0 "register_operand")
1.20681 ++ (ior (match_operand 0 "avr32_const_pool_ref_operand")
1.20682 ++ (match_operand 0 "avr32_address_operand")))
1.20683 ++ (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)")))
1.20684 ++
1.20685 ++;; Return true for operators performing ALU operations
1.20686 ++
1.20687 ++(define_predicate "alu_operator"
1.20688 ++ (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt"))
1.20689 ++
1.20690 ++(define_predicate "avr32_add_shift_immediate_operand"
1.20691 ++ (and (match_operand 0 "immediate_operand")
1.20692 ++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")")))
1.20693 ++
1.20694 ++(define_predicate "avr32_cond_register_immediate_operand"
1.20695 ++ (ior (match_operand 0 "register_operand")
1.20696 ++ (and (match_operand 0 "immediate_operand")
1.20697 ++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
1.20698 ++
1.20699 ++(define_predicate "avr32_cond_immediate_operand"
1.20700 ++ (and (match_operand 0 "immediate_operand")
1.20701 ++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is08\")")))
1.20702 ++
1.20703 ++
1.20704 ++(define_predicate "avr32_cond_move_operand"
1.20705 ++ (ior (ior (match_operand 0 "register_operand")
1.20706 ++ (and (match_operand 0 "immediate_operand")
1.20707 ++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))
1.20708 ++ (and (match_test "TARGET_V2_INSNS")
1.20709 ++ (match_operand 0 "memory_operand"))))
1.20710 ++
1.20711 ++(define_predicate "avr32_mov_immediate_operand"
1.20712 ++ (and (match_operand 0 "immediate_operand")
1.20713 ++ (match_test "avr32_const_ok_for_move(INTVAL(op))")))
1.20714 ++
1.20715 ++
1.20716 ++(define_predicate "avr32_rmw_address_operand"
1.20717 ++ (ior (and (match_code "symbol_ref")
1.20718 ++ (match_test "({rtx symbol = avr32_find_symbol(op); \
1.20719 ++ symbol && (GET_CODE (symbol) == SYMBOL_REF) && SYMBOL_REF_RMW_ADDR(symbol);})"))
1.20720 ++ (and (match_operand 0 "immediate_operand")
1.20721 ++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks17\")")))
1.20722 ++ {
1.20723 ++ return TARGET_RMW && !flag_pic;
1.20724 ++ }
1.20725 ++)
1.20726 ++
1.20727 ++(define_predicate "avr32_rmw_memory_operand"
1.20728 ++ (and (match_code "mem")
1.20729 ++ (match_test "(GET_MODE(op) == SImode) &&
1.20730 ++ avr32_rmw_address_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))")))
1.20731 ++
1.20732 ++(define_predicate "avr32_rmw_memory_or_register_operand"
1.20733 ++ (ior (match_operand 0 "avr32_rmw_memory_operand")
1.20734 ++ (match_operand 0 "register_operand")))
1.20735 ++
1.20736 ++(define_predicate "avr32_non_rmw_memory_operand"
1.20737 ++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
1.20738 ++ (match_operand 0 "memory_operand")))
1.20739 ++
1.20740 ++(define_predicate "avr32_non_rmw_general_operand"
1.20741 ++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
1.20742 ++ (match_operand 0 "general_operand")))
1.20743 ++
1.20744 ++(define_predicate "avr32_non_rmw_nonimmediate_operand"
1.20745 ++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
1.20746 ++ (match_operand 0 "nonimmediate_operand")))
1.20747 +--- /dev/null
1.20748 ++++ b/gcc/config/avr32/simd.md
1.20749 +@@ -0,0 +1,145 @@
1.20750 ++;; AVR32 machine description file for SIMD instructions.
1.20751 ++;; Copyright 2003-2006 Atmel Corporation.
1.20752 ++;;
1.20753 ++;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
1.20754 ++;;
1.20755 ++;; This file is part of GCC.
1.20756 ++;;
1.20757 ++;; This program is free software; you can redistribute it and/or modify
1.20758 ++;; it under the terms of the GNU General Public License as published by
1.20759 ++;; the Free Software Foundation; either version 2 of the License, or
1.20760 ++;; (at your option) any later version.
1.20761 ++;;
1.20762 ++;; This program is distributed in the hope that it will be useful,
1.20763 ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
1.20764 ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.20765 ++;; GNU General Public License for more details.
1.20766 ++;;
1.20767 ++;; You should have received a copy of the GNU General Public License
1.20768 ++;; along with this program; if not, write to the Free Software
1.20769 ++;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
1.20770 ++
1.20771 ++;; -*- Mode: Scheme -*-
1.20772 ++
1.20773 ++
1.20774 ++;; Vector modes
1.20775 ++(define_mode_macro VECM [V2HI V4QI])
1.20776 ++(define_mode_attr size [(V2HI "h") (V4QI "b")])
1.20777 ++
1.20778 ++(define_insn "add<mode>3"
1.20779 ++ [(set (match_operand:VECM 0 "register_operand" "=r")
1.20780 ++ (plus:VECM (match_operand:VECM 1 "register_operand" "r")
1.20781 ++ (match_operand:VECM 2 "register_operand" "r")))]
1.20782 ++ "TARGET_SIMD"
1.20783 ++ "padd.<size>\t%0, %1, %2"
1.20784 ++ [(set_attr "length" "4")
1.20785 ++ (set_attr "type" "alu")])
1.20786 ++
1.20787 ++
1.20788 ++(define_insn "sub<mode>3"
1.20789 ++ [(set (match_operand:VECM 0 "register_operand" "=r")
1.20790 ++ (minus:VECM (match_operand:VECM 1 "register_operand" "r")
1.20791 ++ (match_operand:VECM 2 "register_operand" "r")))]
1.20792 ++ "TARGET_SIMD"
1.20793 ++ "psub.<size>\t%0, %1, %2"
1.20794 ++ [(set_attr "length" "4")
1.20795 ++ (set_attr "type" "alu")])
1.20796 ++
1.20797 ++
1.20798 ++(define_insn "abs<mode>2"
1.20799 ++ [(set (match_operand:VECM 0 "register_operand" "=r")
1.20800 ++ (abs:VECM (match_operand:VECM 1 "register_operand" "r")))]
1.20801 ++ "TARGET_SIMD"
1.20802 ++ "pabs.s<size>\t%0, %1"
1.20803 ++ [(set_attr "length" "4")
1.20804 ++ (set_attr "type" "alu")])
1.20805 ++
1.20806 ++(define_insn "ashl<mode>3"
1.20807 ++ [(set (match_operand:VECM 0 "register_operand" "=r")
1.20808 ++ (ashift:VECM (match_operand:VECM 1 "register_operand" "r")
1.20809 ++ (match_operand:SI 2 "immediate_operand" "Ku04")))]
1.20810 ++ "TARGET_SIMD"
1.20811 ++ "plsl.<size>\t%0, %1, %2"
1.20812 ++ [(set_attr "length" "4")
1.20813 ++ (set_attr "type" "alu")])
1.20814 ++
1.20815 ++(define_insn "ashr<mode>3"
1.20816 ++ [(set (match_operand:VECM 0 "register_operand" "=r")
1.20817 ++ (ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
1.20818 ++ (match_operand:SI 2 "immediate_operand" "Ku04")))]
1.20819 ++ "TARGET_SIMD"
1.20820 ++ "pasr.<size>\t%0, %1, %2"
1.20821 ++ [(set_attr "length" "4")
1.20822 ++ (set_attr "type" "alu")])
1.20823 ++
1.20824 ++(define_insn "lshr<mode>3"
1.20825 ++ [(set (match_operand:VECM 0 "register_operand" "=r")
1.20826 ++ (lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
1.20827 ++ (match_operand:SI 2 "immediate_operand" "Ku04")))]
1.20828 ++ "TARGET_SIMD"
1.20829 ++ "plsr.<size>\t%0, %1, %2"
1.20830 ++ [(set_attr "length" "4")
1.20831 ++ (set_attr "type" "alu")])
1.20832 ++
1.20833 ++(define_insn "smaxv2hi3"
1.20834 ++ [(set (match_operand:V2HI 0 "register_operand" "=r")
1.20835 ++ (smax:V2HI (match_operand:V2HI 1 "register_operand" "r")
1.20836 ++ (match_operand:V2HI 2 "register_operand" "r")))]
1.20837 ++
1.20838 ++ "TARGET_SIMD"
1.20839 ++ "pmax.sh\t%0, %1, %2"
1.20840 ++ [(set_attr "length" "4")
1.20841 ++ (set_attr "type" "alu")])
1.20842 ++
1.20843 ++(define_insn "sminv2hi3"
1.20844 ++ [(set (match_operand:V2HI 0 "register_operand" "=r")
1.20845 ++ (smin:V2HI (match_operand:V2HI 1 "register_operand" "r")
1.20846 ++ (match_operand:V2HI 2 "register_operand" "r")))]
1.20847 ++
1.20848 ++ "TARGET_SIMD"
1.20849 ++ "pmin.sh\t%0, %1, %2"
1.20850 ++ [(set_attr "length" "4")
1.20851 ++ (set_attr "type" "alu")])
1.20852 ++
1.20853 ++(define_insn "umaxv4qi3"
1.20854 ++ [(set (match_operand:V4QI 0 "register_operand" "=r")
1.20855 ++ (umax:V4QI (match_operand:V4QI 1 "register_operand" "r")
1.20856 ++ (match_operand:V4QI 2 "register_operand" "r")))]
1.20857 ++
1.20858 ++ "TARGET_SIMD"
1.20859 ++ "pmax.ub\t%0, %1, %2"
1.20860 ++ [(set_attr "length" "4")
1.20861 ++ (set_attr "type" "alu")])
1.20862 ++
1.20863 ++(define_insn "uminv4qi3"
1.20864 ++ [(set (match_operand:V4QI 0 "register_operand" "=r")
1.20865 ++ (umin:V4QI (match_operand:V4QI 1 "register_operand" "r")
1.20866 ++ (match_operand:V4QI 2 "register_operand" "r")))]
1.20867 ++
1.20868 ++ "TARGET_SIMD"
1.20869 ++ "pmin.ub\t%0, %1, %2"
1.20870 ++ [(set_attr "length" "4")
1.20871 ++ (set_attr "type" "alu")])
1.20872 ++
1.20873 ++
1.20874 ++(define_insn "addsubv2hi"
1.20875 ++ [(set (match_operand:V2HI 0 "register_operand" "=r")
1.20876 ++ (vec_concat:V2HI
1.20877 ++ (plus:HI (match_operand:HI 1 "register_operand" "r")
1.20878 ++ (match_operand:HI 2 "register_operand" "r"))
1.20879 ++ (minus:HI (match_dup 1) (match_dup 2))))]
1.20880 ++ "TARGET_SIMD"
1.20881 ++ "paddsub.h\t%0, %1:b, %2:b"
1.20882 ++ [(set_attr "length" "4")
1.20883 ++ (set_attr "type" "alu")])
1.20884 ++
1.20885 ++(define_insn "subaddv2hi"
1.20886 ++ [(set (match_operand:V2HI 0 "register_operand" "=r")
1.20887 ++ (vec_concat:V2HI
1.20888 ++ (minus:HI (match_operand:HI 1 "register_operand" "r")
1.20889 ++ (match_operand:HI 2 "register_operand" "r"))
1.20890 ++ (plus:HI (match_dup 1) (match_dup 2))))]
1.20891 ++ "TARGET_SIMD"
1.20892 ++ "psubadd.h\t%0, %1:b, %2:b"
1.20893 ++ [(set_attr "length" "4")
1.20894 ++ (set_attr "type" "alu")])
1.20895 +--- /dev/null
1.20896 ++++ b/gcc/config/avr32/sync.md
1.20897 +@@ -0,0 +1,244 @@
1.20898 ++;;=================================================================
1.20899 ++;; Atomic operations
1.20900 ++;;=================================================================
1.20901 ++
1.20902 ++
1.20903 ++(define_insn "sync_compare_and_swapsi"
1.20904 ++ [(set (match_operand:SI 0 "register_operand" "=&r,&r")
1.20905 ++ (match_operand:SI 1 "memory_operand" "+RKs16,+RKs16"))
1.20906 ++ (set (match_dup 1)
1.20907 ++ (unspec_volatile:SI
1.20908 ++ [(match_dup 1)
1.20909 ++ (match_operand:SI 2 "register_immediate_operand" "r,Ks21")
1.20910 ++ (match_operand:SI 3 "register_operand" "r,r")]
1.20911 ++ VUNSPEC_SYNC_CMPXCHG)) ]
1.20912 ++ ""
1.20913 ++ "0:
1.20914 ++ ssrf\t5
1.20915 ++ ld.w\t%0,%1
1.20916 ++ cp.w\t%0,%2
1.20917 ++ brne\t0f
1.20918 ++ stcond\t%1, %3
1.20919 ++ brne\t0b
1.20920 ++ 0:
1.20921 ++ "
1.20922 ++ [(set_attr "length" "16,18")
1.20923 ++ (set_attr "cc" "clobber")]
1.20924 ++ )
1.20925 ++
1.20926 ++
1.20927 ++(define_code_macro atomic_op [plus minus and ior xor])
1.20928 ++(define_code_attr atomic_asm_insn [(plus "add") (minus "sub") (and "and") (ior "or") (xor "eor")])
1.20929 ++(define_code_attr atomic_insn [(plus "add") (minus "sub") (and "and") (ior "ior") (xor "xor")])
1.20930 ++
1.20931 ++(define_insn "sync_loadsi"
1.20932 ++ ; NB! Put an early clobber on the destination operand to
1.20933 ++ ; avoid gcc using the same register in the source and
1.20934 ++ ; destination. This is done in order to avoid gcc to
1.20935 ++ ; clobber the source operand since these instructions
1.20936 ++ ; are actually inside a "loop".
1.20937 ++ [(set (match_operand:SI 0 "register_operand" "=&r")
1.20938 ++ (unspec_volatile:SI
1.20939 ++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "RKs16")
1.20940 ++ (label_ref (match_operand 2 "" ""))]
1.20941 ++ VUNSPEC_SYNC_SET_LOCK_AND_LOAD) )]
1.20942 ++ ""
1.20943 ++ "%2:
1.20944 ++ ssrf\t5
1.20945 ++ ld.w\t%0,%1"
1.20946 ++ [(set_attr "length" "6")
1.20947 ++ (set_attr "cc" "clobber")]
1.20948 ++ )
1.20949 ++
1.20950 ++(define_insn "sync_store_if_lock"
1.20951 ++ [(set (match_operand:SI 0 "avr32_ks16_memory_operand" "=RKs16")
1.20952 ++ (unspec_volatile:SI
1.20953 ++ [(match_operand:SI 1 "register_operand" "r")
1.20954 ++ (label_ref (match_operand 2 "" ""))]
1.20955 ++ VUNSPEC_SYNC_STORE_IF_LOCK) )]
1.20956 ++ ""
1.20957 ++ "stcond\t%0, %1
1.20958 ++ brne\t%2"
1.20959 ++ [(set_attr "length" "6")
1.20960 ++ (set_attr "cc" "clobber")]
1.20961 ++ )
1.20962 ++
1.20963 ++
1.20964 ++(define_expand "sync_<atomic_insn>si"
1.20965 ++ [(set (match_dup 2)
1.20966 ++ (unspec_volatile:SI
1.20967 ++ [(match_operand:SI 0 "avr32_ks16_memory_operand" "")
1.20968 ++ (match_dup 3)]
1.20969 ++ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
1.20970 ++ (set (match_dup 2)
1.20971 ++ (atomic_op:SI (match_dup 2)
1.20972 ++ (match_operand:SI 1 "register_immediate_operand" "")))
1.20973 ++ (set (match_dup 0)
1.20974 ++ (unspec_volatile:SI
1.20975 ++ [(match_dup 2)
1.20976 ++ (match_dup 3)]
1.20977 ++ VUNSPEC_SYNC_STORE_IF_LOCK) )
1.20978 ++ (use (match_dup 1))
1.20979 ++ (use (match_dup 4))]
1.20980 ++ ""
1.20981 ++ {
1.20982 ++ rtx *mem_expr = &operands[0];
1.20983 ++ rtx ptr_reg;
1.20984 ++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
1.20985 ++ {
1.20986 ++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
1.20987 ++ XEXP (*mem_expr, 0) = ptr_reg;
1.20988 ++ }
1.20989 ++ else
1.20990 ++ {
1.20991 ++ rtx address = XEXP (*mem_expr, 0);
1.20992 ++ if ( REG_P (address) )
1.20993 ++ ptr_reg = address;
1.20994 ++ else if ( REG_P (XEXP (address, 0)) )
1.20995 ++ ptr_reg = XEXP (address, 0);
1.20996 ++ else
1.20997 ++ ptr_reg = XEXP (address, 1);
1.20998 ++ }
1.20999 ++
1.21000 ++ operands[2] = gen_reg_rtx (SImode);
1.21001 ++ operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
1.21002 ++ operands[4] = ptr_reg;
1.21003 ++
1.21004 ++ }
1.21005 ++ )
1.21006 ++
1.21007 ++
1.21008 ++
1.21009 ++(define_expand "sync_old_<atomic_insn>si"
1.21010 ++ [(set (match_operand:SI 0 "register_operand" "")
1.21011 ++ (unspec_volatile:SI
1.21012 ++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
1.21013 ++ (match_dup 4)]
1.21014 ++ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
1.21015 ++ (set (match_dup 3)
1.21016 ++ (atomic_op:SI (match_dup 0)
1.21017 ++ (match_operand:SI 2 "register_immediate_operand" "")))
1.21018 ++ (set (match_dup 1)
1.21019 ++ (unspec_volatile:SI
1.21020 ++ [(match_dup 3)
1.21021 ++ (match_dup 4)]
1.21022 ++ VUNSPEC_SYNC_STORE_IF_LOCK) )
1.21023 ++ (use (match_dup 2))
1.21024 ++ (use (match_dup 5))]
1.21025 ++ ""
1.21026 ++ {
1.21027 ++ rtx *mem_expr = &operands[1];
1.21028 ++ rtx ptr_reg;
1.21029 ++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
1.21030 ++ {
1.21031 ++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
1.21032 ++ XEXP (*mem_expr, 0) = ptr_reg;
1.21033 ++ }
1.21034 ++ else
1.21035 ++ {
1.21036 ++ rtx address = XEXP (*mem_expr, 0);
1.21037 ++ if ( REG_P (address) )
1.21038 ++ ptr_reg = address;
1.21039 ++ else if ( REG_P (XEXP (address, 0)) )
1.21040 ++ ptr_reg = XEXP (address, 0);
1.21041 ++ else
1.21042 ++ ptr_reg = XEXP (address, 1);
1.21043 ++ }
1.21044 ++
1.21045 ++ operands[3] = gen_reg_rtx (SImode);
1.21046 ++ operands[4] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
1.21047 ++ operands[5] = ptr_reg;
1.21048 ++ }
1.21049 ++ )
1.21050 ++
1.21051 ++(define_expand "sync_new_<atomic_insn>si"
1.21052 ++ [(set (match_operand:SI 0 "register_operand" "")
1.21053 ++ (unspec_volatile:SI
1.21054 ++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
1.21055 ++ (match_dup 3)]
1.21056 ++ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
1.21057 ++ (set (match_dup 0)
1.21058 ++ (atomic_op:SI (match_dup 0)
1.21059 ++ (match_operand:SI 2 "register_immediate_operand" "")))
1.21060 ++ (set (match_dup 1)
1.21061 ++ (unspec_volatile:SI
1.21062 ++ [(match_dup 0)
1.21063 ++ (match_dup 3)]
1.21064 ++ VUNSPEC_SYNC_STORE_IF_LOCK) )
1.21065 ++ (use (match_dup 2))
1.21066 ++ (use (match_dup 4))]
1.21067 ++ ""
1.21068 ++ {
1.21069 ++ rtx *mem_expr = &operands[1];
1.21070 ++ rtx ptr_reg;
1.21071 ++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
1.21072 ++ {
1.21073 ++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
1.21074 ++ XEXP (*mem_expr, 0) = ptr_reg;
1.21075 ++ }
1.21076 ++ else
1.21077 ++ {
1.21078 ++ rtx address = XEXP (*mem_expr, 0);
1.21079 ++ if ( REG_P (address) )
1.21080 ++ ptr_reg = address;
1.21081 ++ else if ( REG_P (XEXP (address, 0)) )
1.21082 ++ ptr_reg = XEXP (address, 0);
1.21083 ++ else
1.21084 ++ ptr_reg = XEXP (address, 1);
1.21085 ++ }
1.21086 ++
1.21087 ++ operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
1.21088 ++ operands[4] = ptr_reg;
1.21089 ++ }
1.21090 ++ )
1.21091 ++
1.21092 ++
1.21093 ++;(define_insn "sync_<atomic_insn>si"
1.21094 ++; [(set (match_operand:SI 0 "memory_operand" "+RKs16")
1.21095 ++; (unspec_volatile:SI
1.21096 ++; [(atomic_op:SI (match_dup 0)
1.21097 ++; (match_operand:SI 1 "register_operand" "r"))]
1.21098 ++; VUNSPEC_SYNC_CMPXCHG))
1.21099 ++; (clobber (match_scratch:SI 2 "=&r"))]
1.21100 ++; ""
1.21101 ++; "0:
1.21102 ++; ssrf\t5
1.21103 ++; ld.w\t%2,%0
1.21104 ++; <atomic_asm_insn>\t%2,%1
1.21105 ++; stcond\t%0, %2
1.21106 ++; brne\t0b
1.21107 ++; "
1.21108 ++; [(set_attr "length" "14")
1.21109 ++; (set_attr "cc" "clobber")]
1.21110 ++; )
1.21111 ++;
1.21112 ++;(define_insn "sync_new_<atomic_insn>si"
1.21113 ++; [(set (match_operand:SI 1 "memory_operand" "+RKs16")
1.21114 ++; (unspec_volatile:SI
1.21115 ++; [(atomic_op:SI (match_dup 1)
1.21116 ++; (match_operand:SI 2 "register_operand" "r"))]
1.21117 ++; VUNSPEC_SYNC_CMPXCHG))
1.21118 ++; (set (match_operand:SI 0 "register_operand" "=&r")
1.21119 ++; (atomic_op:SI (match_dup 1)
1.21120 ++; (match_dup 2)))]
1.21121 ++; ""
1.21122 ++; "0:
1.21123 ++; ssrf\t5
1.21124 ++; ld.w\t%0,%1
1.21125 ++; <atomic_asm_insn>\t%0,%2
1.21126 ++; stcond\t%1, %0
1.21127 ++; brne\t0b
1.21128 ++; "
1.21129 ++; [(set_attr "length" "14")
1.21130 ++; (set_attr "cc" "clobber")]
1.21131 ++; )
1.21132 ++
1.21133 ++(define_insn "sync_lock_test_and_setsi"
1.21134 ++ [ (set (match_operand:SI 0 "register_operand" "=&r")
1.21135 ++ (match_operand:SI 1 "memory_operand" "+RKu00"))
1.21136 ++ (set (match_dup 1)
1.21137 ++ (match_operand:SI 2 "register_operand" "r")) ]
1.21138 ++ ""
1.21139 ++ "xchg\t%0, %p1, %2"
1.21140 ++ [(set_attr "length" "4")]
1.21141 ++ )
1.21142 +--- /dev/null
1.21143 ++++ b/gcc/config/avr32/t-avr32
1.21144 +@@ -0,0 +1,77 @@
1.21145 ++
1.21146 ++MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
1.21147 ++ $(srcdir)/config/avr32/sync.md \
1.21148 ++ $(srcdir)/config/avr32/fpcp.md \
1.21149 ++ $(srcdir)/config/avr32/simd.md \
1.21150 ++ $(srcdir)/config/avr32/predicates.md
1.21151 ++
1.21152 ++s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
1.21153 ++ s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
1.21154 ++
1.21155 ++# We want fine grained libraries, so use the new code
1.21156 ++# to build the floating point emulation libraries.
1.21157 ++FPBIT = fp-bit.c
1.21158 ++DPBIT = dp-bit.c
1.21159 ++
1.21160 ++LIB1ASMSRC = avr32/lib1funcs.S
1.21161 ++LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
1.21162 ++ _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
1.21163 ++ _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
1.21164 ++ _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
1.21165 ++ _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
1.21166 ++ _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
1.21167 ++ _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
1.21168 ++
1.21169 ++#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
1.21170 ++
1.21171 ++MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul
1.21172 ++MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul
1.21173 ++MULTILIB_EXCEPTIONS =
1.21174 ++MULTILIB_MATCHES += march?ap=mpart?ap7000
1.21175 ++MULTILIB_MATCHES += march?ap=mpart?ap7001
1.21176 ++MULTILIB_MATCHES += march?ap=mpart?ap7002
1.21177 ++MULTILIB_MATCHES += march?ap=mpart?ap7200
1.21178 ++MULTILIB_MATCHES += march?ucr1=march?uc
1.21179 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
1.21180 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
1.21181 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
1.21182 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
1.21183 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
1.21184 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
1.21185 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
1.21186 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
1.21187 ++MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
1.21188 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
1.21189 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
1.21190 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
1.21191 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
1.21192 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
1.21193 ++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
1.21194 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
1.21195 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
1.21196 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
1.21197 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
1.21198 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
1.21199 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
1.21200 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
1.21201 ++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
1.21202 ++
1.21203 ++
1.21204 ++EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
1.21205 ++
1.21206 ++CRTSTUFF_T_CFLAGS = -mrelax
1.21207 ++CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
1.21208 ++TARGET_LIBGCC2_CFLAGS += -mrelax
1.21209 ++
1.21210 ++LIBGCC = stmp-multilib
1.21211 ++INSTALL_LIBGCC = install-multilib
1.21212 ++
1.21213 ++fp-bit.c: $(srcdir)/config/fp-bit.c
1.21214 ++ echo '#define FLOAT' > fp-bit.c
1.21215 ++ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
1.21216 ++
1.21217 ++dp-bit.c: $(srcdir)/config/fp-bit.c
1.21218 ++ cat $(srcdir)/config/fp-bit.c > dp-bit.c
1.21219 ++
1.21220 ++
1.21221 ++
1.21222 +--- /dev/null
1.21223 ++++ b/gcc/config/avr32/t-elf
1.21224 +@@ -0,0 +1,16 @@
1.21225 ++
1.21226 ++# Assemble startup files.
1.21227 ++$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES)
1.21228 ++ $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
1.21229 ++ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm
1.21230 ++
1.21231 ++$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES)
1.21232 ++ $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
1.21233 ++ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm
1.21234 ++
1.21235 ++
1.21236 ++# Build the libraries for both hard and soft floating point
1.21237 ++EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
1.21238 ++
1.21239 ++LIBGCC = stmp-multilib
1.21240 ++INSTALL_LIBGCC = install-multilib
1.21241 +--- /dev/null
1.21242 ++++ b/gcc/config/avr32/uclinux-elf.h
1.21243 +@@ -0,0 +1,20 @@
1.21244 ++
1.21245 ++/* Run-time Target Specification. */
1.21246 ++#undef TARGET_VERSION
1.21247 ++#define TARGET_VERSION fputs (" (AVR32 uClinux with ELF)", stderr)
1.21248 ++
1.21249 ++/* We don't want a .jcr section on uClinux. As if this makes a difference... */
1.21250 ++#define TARGET_USE_JCR_SECTION 0
1.21251 ++
1.21252 ++/* Here we go. Drop the crtbegin/crtend stuff completely. */
1.21253 ++#undef STARTFILE_SPEC
1.21254 ++#define STARTFILE_SPEC \
1.21255 ++ "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}" \
1.21256 ++ " %{!p:%{profile:gcrt1.o%s}" \
1.21257 ++ " %{!profile:crt1.o%s}}}} crti.o%s"
1.21258 ++
1.21259 ++#undef ENDFILE_SPEC
1.21260 ++#define ENDFILE_SPEC "crtn.o%s"
1.21261 ++
1.21262 ++#undef TARGET_DEFAULT
1.21263 ++#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT)
1.21264 +--- a/gcc/config/host-linux.c
1.21265 ++++ b/gcc/config/host-linux.c
1.21266 +@@ -25,6 +25,9 @@
1.21267 + #include "hosthooks.h"
1.21268 + #include "hosthooks-def.h"
1.21269 +
1.21270 ++#ifndef SSIZE_MAX
1.21271 ++#define SSIZE_MAX LONG_MAX
1.21272 ++#endif
1.21273 +
1.21274 + /* Linux has a feature called exec-shield-randomize that perturbs the
1.21275 + address of non-fixed mapped segments by a (relatively) small amount.
1.21276 +--- a/gcc/config.gcc
1.21277 ++++ b/gcc/config.gcc
1.21278 +@@ -781,6 +781,24 @@ avr-*-*)
1.21279 + tm_file="avr/avr.h dbxelf.h"
1.21280 + use_fixproto=yes
1.21281 + ;;
1.21282 ++avr32*-*-linux*)
1.21283 ++ tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h "
1.21284 ++ tmake_file="t-linux avr32/t-avr32 avr32/t-elf"
1.21285 ++ extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
1.21286 ++ extra_modes=avr32/avr32-modes.def
1.21287 ++ gnu_ld=yes
1.21288 ++ ;;
1.21289 ++avr32*-*-uclinux*)
1.21290 ++ tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h"
1.21291 ++ tmake_file="t-linux avr32/t-avr32 avr32/t-elf"
1.21292 ++ extra_modes=avr32/avr32-modes.def
1.21293 ++ gnu_ld=yes
1.21294 ++ ;;
1.21295 ++avr32-*-*)
1.21296 ++ tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h"
1.21297 ++ tmake_file="avr32/t-avr32 avr32/t-elf"
1.21298 ++ extra_modes=avr32/avr32-modes.def
1.21299 ++ ;;
1.21300 + bfin*-elf*)
1.21301 + tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
1.21302 + tmake_file=bfin/t-bfin-elf
1.21303 +@@ -1681,6 +1699,9 @@ pdp11-*-bsd)
1.21304 + pdp11-*-*)
1.21305 + use_fixproto=yes
1.21306 + ;;
1.21307 ++avr-*-*)
1.21308 ++ use_fixproto=yes
1.21309 ++ ;;
1.21310 + # port not yet contributed
1.21311 + #powerpc-*-openbsd*)
1.21312 + # tmake_file="${tmake_file} rs6000/t-fprules rs6000/t-fprules-fpbit "
1.21313 +@@ -2717,6 +2738,32 @@ case "${target}" in
1.21314 + fi
1.21315 + ;;
1.21316 +
1.21317 ++ avr32*-*-*)
1.21318 ++ supported_defaults="part arch"
1.21319 ++
1.21320 ++ case "$with_part" in
1.21321 ++ "" \
1.21322 ++ | "ap7000" | "ap7010" | "ap7020" | "uc3a0256" | "uc3a0512" | "uc3a1128" | "uc3a1256" | "uc3a1512" )
1.21323 ++ # OK
1.21324 ++ ;;
1.21325 ++ *)
1.21326 ++ echo "Unknown part used in --with-part=$with_part" 1>&2
1.21327 ++ exit 1
1.21328 ++ ;;
1.21329 ++ esac
1.21330 ++
1.21331 ++ case "$with_arch" in
1.21332 ++ "" \
1.21333 ++ | "ap" | "uc")
1.21334 ++ # OK
1.21335 ++ ;;
1.21336 ++ *)
1.21337 ++ echo "Unknown arch used in --with-arch=$with_arch" 1>&2
1.21338 ++ exit 1
1.21339 ++ ;;
1.21340 ++ esac
1.21341 ++ ;;
1.21342 ++
1.21343 + fr*-*-*linux*)
1.21344 + supported_defaults=cpu
1.21345 + case "$with_cpu" in
1.21346 +--- a/gcc/doc/extend.texi
1.21347 ++++ b/gcc/doc/extend.texi
1.21348 +@@ -1981,7 +1981,7 @@ this attribute to work correctly.
1.21349 +
1.21350 + @item interrupt
1.21351 + @cindex interrupt handler functions
1.21352 +-Use this attribute on the ARM, AVR, C4x, CRX, M32C, M32R/D, MS1, and Xstormy16
1.21353 ++Use this attribute on the ARM, AVR, AVR32, C4x, CRX, M32C, M32R/D, MS1, and Xstormy16
1.21354 + ports to indicate that the specified function is an interrupt handler.
1.21355 + The compiler will generate function entry and exit sequences suitable
1.21356 + for use in an interrupt handler when this attribute is present.
1.21357 +@@ -2000,6 +2000,15 @@ void f () __attribute__ ((interrupt ("IR
1.21358 +
1.21359 + Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@.
1.21360 +
1.21361 ++Note, for the AVR32, you can specify which banking scheme is used for
1.21362 ++the interrupt mode this interrupt handler is used in like this:
1.21363 ++
1.21364 ++@smallexample
1.21365 ++void f () __attribute__ ((interrupt ("FULL")));
1.21366 ++@end smallexample
1.21367 ++
1.21368 ++Permissible values for this parameter are: FULL, HALF, NONE and UNDEF.
1.21369 ++
1.21370 + @item interrupt_handler
1.21371 + @cindex interrupt handler functions on the Blackfin, m68k, H8/300 and SH processors
1.21372 + Use this attribute on the Blackfin, m68k, H8/300, H8/300H, H8S, and SH to
1.21373 +@@ -3460,6 +3469,23 @@ placed in either the @code{.bss_below100
1.21374 +
1.21375 + @end table
1.21376 +
1.21377 ++@subsection AVR32 Variable Attributes
1.21378 ++
1.21379 ++One attribute is currently defined for AVR32 configurations:
1.21380 ++@code{rmw_addressable}
1.21381 ++
1.21382 ++@table @code
1.21383 ++@item rmw_addressable
1.21384 ++@cindex @code{rmw_addressable} attribute
1.21385 ++
1.21386 ++This attribute can be used to signal that a variable can be accessed
1.21387 ++with the addressing mode of the AVR32 Atomic Read-Modify-Write memory
1.21388 ++instructions and hence make it possible for gcc to generate these
1.21389 ++instructions without using built-in functions or inline assembly statements.
1.21390 ++Variables used within the AVR32 Atomic Read-Modify-Write built-in
1.21391 ++functions will automatically get the @code{rmw_addressable} attribute.
1.21392 ++@end table
1.21393 ++
1.21394 + @node Type Attributes
1.21395 + @section Specifying Attributes of Types
1.21396 + @cindex attribute of types
1.21397 +@@ -6167,6 +6193,7 @@ instructions, but allow the compiler to
1.21398 + @menu
1.21399 + * Alpha Built-in Functions::
1.21400 + * ARM Built-in Functions::
1.21401 ++* AVR32 Built-in Functions::
1.21402 + * Blackfin Built-in Functions::
1.21403 + * FR-V Built-in Functions::
1.21404 + * X86 Built-in Functions::
1.21405 +@@ -6405,6 +6432,76 @@ long long __builtin_arm_wxor (long long,
1.21406 + long long __builtin_arm_wzero ()
1.21407 + @end smallexample
1.21408 +
1.21409 ++@node AVR32 Built-in Functions
1.21410 ++@subsection AVR32 Built-in Functions
1.21411 ++
1.21412 ++
1.21413 ++
1.21414 ++Built-in functions for atomic memory (RMW) instructions. Note that these
1.21415 ++built-ins will fail for targets where the RMW instructions are not
1.21416 ++implemented. Also note that these instructions only that a Ks15 << 2
1.21417 ++memory address and will therefor not work with any runtime computed
1.21418 ++memory addresses. The user is responsible for making sure that any
1.21419 ++pointers used within these functions points to a valid memory address.
1.21420 ++
1.21421 ++@smallexample
1.21422 ++void __builtin_mems(int */*ptr*/, int /*bit*/)
1.21423 ++void __builtin_memc(int */*ptr*/, int /*bit*/)
1.21424 ++void __builtin_memt(int */*ptr*/, int /*bit*/)
1.21425 ++@end smallexample
1.21426 ++
1.21427 ++Built-in functions for DSP instructions. Note that these built-ins will
1.21428 ++fail for targets where the DSP instructions are not implemented.
1.21429 ++
1.21430 ++@smallexample
1.21431 ++int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/)
1.21432 ++int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/)
1.21433 ++int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/)
1.21434 ++int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/)
1.21435 ++short __builtin_mulsathh_h (short, short)
1.21436 ++int __builtin_mulsathh_w (short, short)
1.21437 ++short __builtin_mulsatrndhh_h (short, short)
1.21438 ++int __builtin_mulsatrndwh_w (int, short)
1.21439 ++int __builtin_mulsatwh_w (int, short)
1.21440 ++int __builtin_macsathh_w (int, short, short)
1.21441 ++short __builtin_satadd_h (short, short)
1.21442 ++short __builtin_satsub_h (short, short)
1.21443 ++int __builtin_satadd_w (int, int)
1.21444 ++int __builtin_satsub_w (int, int)
1.21445 ++long long __builtin_mulwh_d(int, short)
1.21446 ++long long __builtin_mulnwh_d(int, short)
1.21447 ++long long __builtin_macwh_d(long long, int, short)
1.21448 ++long long __builtin_machh_d(long long, short, short)
1.21449 ++@end smallexample
1.21450 ++
1.21451 ++Other built-in functions for instructions that cannot easily be
1.21452 ++generated by the compiler.
1.21453 ++
1.21454 ++@smallexample
1.21455 ++void __builtin_ssrf(int);
1.21456 ++void __builtin_csrf(int);
1.21457 ++void __builtin_musfr(int);
1.21458 ++int __builtin_mustr(void);
1.21459 ++int __builtin_mfsr(int /*Status Register Address*/)
1.21460 ++void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/)
1.21461 ++int __builtin_mfdr(int /*Debug Register Address*/)
1.21462 ++void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/)
1.21463 ++void __builtin_cache(void * /*Address*/, int /*Cache Operation*/)
1.21464 ++void __builtin_sync(int /*Sync Operation*/)
1.21465 ++void __builtin_tlbr(void)
1.21466 ++void __builtin_tlbs(void)
1.21467 ++void __builtin_tlbw(void)
1.21468 ++void __builtin_breakpoint(void)
1.21469 ++int __builtin_xchg(void * /*Address*/, int /*Value*/ )
1.21470 ++short __builtin_bswap_16(short)
1.21471 ++int __builtin_bswap_32(int)
1.21472 ++void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/)
1.21473 ++int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/)
1.21474 ++void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/)
1.21475 ++long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/)
1.21476 ++void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/)
1.21477 ++@end smallexample
1.21478 ++
1.21479 + @node Blackfin Built-in Functions
1.21480 + @subsection Blackfin Built-in Functions
1.21481 +
1.21482 +--- a/gcc/doc/invoke.texi
1.21483 ++++ b/gcc/doc/invoke.texi
1.21484 +@@ -190,7 +190,7 @@ in the following sections.
1.21485 + -fno-default-inline -fvisibility-inlines-hidden @gol
1.21486 + -Wabi -Wctor-dtor-privacy @gol
1.21487 + -Wnon-virtual-dtor -Wreorder @gol
1.21488 +--Weffc++ -Wno-deprecated -Wstrict-null-sentinel @gol
1.21489 ++-Weffc++ -Wno-deprecated @gol
1.21490 + -Wno-non-template-friend -Wold-style-cast @gol
1.21491 + -Woverloaded-virtual -Wno-pmf-conversions @gol
1.21492 + -Wsign-promo}
1.21493 +@@ -588,6 +588,12 @@ Objective-C and Objective-C++ Dialects}.
1.21494 + -mauto-incdec -minmax -mlong-calls -mshort @gol
1.21495 + -msoft-reg-count=@var{count}}
1.21496 +
1.21497 ++@emph{AVR32 Options}
1.21498 ++@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol
1.21499 ++-mforce-double-align -mno-init-got -mrelax -mmd-reorg-opt -masm-addr-pseudos @gol
1.21500 ++-mpart=@var{part} -mcpu=@var{cpu} -march=@var{arch} @gol
1.21501 ++-mfast-float -mimm-in-const-pool}
1.21502 ++
1.21503 + @emph{MCore Options}
1.21504 + @gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol
1.21505 + -mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol
1.21506 +@@ -1868,14 +1874,6 @@ to filter out those warnings.
1.21507 + @opindex Wno-deprecated
1.21508 + Do not warn about usage of deprecated features. @xref{Deprecated Features}.
1.21509 +
1.21510 +-@item -Wstrict-null-sentinel @r{(C++ only)}
1.21511 +-@opindex Wstrict-null-sentinel
1.21512 +-Warn also about the use of an uncasted @code{NULL} as sentinel. When
1.21513 +-compiling only with GCC this is a valid sentinel, as @code{NULL} is defined
1.21514 +-to @code{__null}. Although it is a null pointer constant not a null pointer,
1.21515 +-it is guaranteed to of the same size as a pointer. But this use is
1.21516 +-not portable across different compilers.
1.21517 +-
1.21518 + @item -Wno-non-template-friend @r{(C++ only)}
1.21519 + @opindex Wno-non-template-friend
1.21520 + Disable warnings when non-templatized friend functions are declared
1.21521 +@@ -2732,13 +2730,11 @@ requiring @option{-O}.
1.21522 + If you want to warn about code which uses the uninitialized value of the
1.21523 + variable in its own initializer, use the @option{-Winit-self} option.
1.21524 +
1.21525 +-These warnings occur for individual uninitialized or clobbered
1.21526 +-elements of structure, union or array variables as well as for
1.21527 +-variables which are uninitialized or clobbered as a whole. They do
1.21528 +-not occur for variables or elements declared @code{volatile}. Because
1.21529 +-these warnings depend on optimization, the exact variables or elements
1.21530 +-for which there are warnings will depend on the precise optimization
1.21531 +-options and version of GCC used.
1.21532 ++These warnings occur only for variables that are candidates for
1.21533 ++register allocation. Therefore, they do not occur for a variable that
1.21534 ++is declared @code{volatile}, or whose address is taken, or whose size
1.21535 ++is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
1.21536 ++structures, unions or arrays, even when they are in registers.
1.21537 +
1.21538 + Note that there may be no warning about a variable that is used only
1.21539 + to compute a value that itself is never used, because such
1.21540 +@@ -6201,10 +6197,6 @@ If number of candidates in the set is sm
1.21541 + we always try to remove unnecessary ivs from the set during its
1.21542 + optimization when a new iv is added to the set.
1.21543 +
1.21544 +-@item scev-max-expr-size
1.21545 +-Bound on size of expressions used in the scalar evolutions analyzer.
1.21546 +-Large expressions slow the analyzer.
1.21547 +-
1.21548 + @item vect-max-version-checks
1.21549 + The maximum number of runtime checks that can be performed when doing
1.21550 + loop versioning in the vectorizer. See option ftree-vect-loop-version
1.21551 +@@ -7402,7 +7394,7 @@ platform.
1.21552 + * ARC Options::
1.21553 + * ARM Options::
1.21554 + * AVR Options::
1.21555 +-* Blackfin Options::
1.21556 ++* AVR32 Options::
1.21557 + * CRIS Options::
1.21558 + * CRX Options::
1.21559 + * Darwin Options::
1.21560 +@@ -7867,81 +7859,80 @@ comply to the C standards, but it will p
1.21561 + size.
1.21562 + @end table
1.21563 +
1.21564 +-@node Blackfin Options
1.21565 +-@subsection Blackfin Options
1.21566 +-@cindex Blackfin Options
1.21567 ++@node AVR32 Options
1.21568 ++@subsection AVR32 Options
1.21569 ++@cindex AVR32 Options
1.21570 ++
1.21571 ++These options are defined for AVR32 implementations:
1.21572 +
1.21573 + @table @gcctabopt
1.21574 +-@item -momit-leaf-frame-pointer
1.21575 +-@opindex momit-leaf-frame-pointer
1.21576 +-Don't keep the frame pointer in a register for leaf functions. This
1.21577 +-avoids the instructions to save, set up and restore frame pointers and
1.21578 +-makes an extra register available in leaf functions. The option
1.21579 +-@option{-fomit-frame-pointer} removes the frame pointer for all functions
1.21580 +-which might make debugging harder.
1.21581 ++@item -muse-rodata-section
1.21582 ++@opindex muse-rodata-section
1.21583 ++Use section @samp{.rodata} for read-only data instead of @samp{.text}.
1.21584 +
1.21585 +-@item -mspecld-anomaly
1.21586 +-@opindex mspecld-anomaly
1.21587 +-When enabled, the compiler will ensure that the generated code does not
1.21588 +-contain speculative loads after jump instructions. This option is enabled
1.21589 +-by default.
1.21590 +-
1.21591 +-@item -mno-specld-anomaly
1.21592 +-@opindex mno-specld-anomaly
1.21593 +-Don't generate extra code to prevent speculative loads from occurring.
1.21594 +-
1.21595 +-@item -mcsync-anomaly
1.21596 +-@opindex mcsync-anomaly
1.21597 +-When enabled, the compiler will ensure that the generated code does not
1.21598 +-contain CSYNC or SSYNC instructions too soon after conditional branches.
1.21599 +-This option is enabled by default.
1.21600 +-
1.21601 +-@item -mno-csync-anomaly
1.21602 +-@opindex mno-csync-anomaly
1.21603 +-Don't generate extra code to prevent CSYNC or SSYNC instructions from
1.21604 +-occurring too soon after a conditional branch.
1.21605 +-
1.21606 +-@item -mlow-64k
1.21607 +-@opindex mlow-64k
1.21608 +-When enabled, the compiler is free to take advantage of the knowledge that
1.21609 +-the entire program fits into the low 64k of memory.
1.21610 +-
1.21611 +-@item -mno-low-64k
1.21612 +-@opindex mno-low-64k
1.21613 +-Assume that the program is arbitrarily large. This is the default.
1.21614 ++@item -mhard-float
1.21615 ++@opindex mhard-float
1.21616 ++Use floating point coprocessor instructions.
1.21617 +
1.21618 +-@item -mid-shared-library
1.21619 +-@opindex mid-shared-library
1.21620 +-Generate code that supports shared libraries via the library ID method.
1.21621 +-This allows for execute in place and shared libraries in an environment
1.21622 +-without virtual memory management. This option implies @option{-fPIC}.
1.21623 ++@item -msoft-float
1.21624 ++@opindex msoft-float
1.21625 ++Use software floating-point library for floating-point operations.
1.21626 +
1.21627 +-@item -mno-id-shared-library
1.21628 +-@opindex mno-id-shared-library
1.21629 +-Generate code that doesn't assume ID based shared libraries are being used.
1.21630 +-This is the default.
1.21631 ++@item -mforce-double-align
1.21632 ++@opindex mforce-double-align
1.21633 ++Force double-word alignment for double-word memory accesses.
1.21634 ++
1.21635 ++@item -mno-init-got
1.21636 ++@opindex mno-init-got
1.21637 ++Do not initialize the GOT register before using it when compiling PIC
1.21638 ++code.
1.21639 +
1.21640 +-@item -mshared-library-id=n
1.21641 +-@opindex mshared-library-id
1.21642 +-Specified the identification number of the ID based shared library being
1.21643 +-compiled. Specifying a value of 0 will generate more compact code, specifying
1.21644 +-other values will force the allocation of that number to the current
1.21645 +-library but is no more space or time efficient than omitting this option.
1.21646 ++@item -mrelax
1.21647 ++@opindex mrelax
1.21648 ++Let invoked assembler and linker do relaxing
1.21649 ++(Enabled by default when optimization level is >1).
1.21650 ++This means that when the address of symbols are known at link time,
1.21651 ++the linker can optimize @samp{icall} and @samp{mcall}
1.21652 ++instructions into a @samp{rcall} instruction if possible.
1.21653 ++Loading the address of a symbol can also be optimized.
1.21654 ++
1.21655 ++@item -mmd-reorg-opt
1.21656 ++@opindex mmd-reorg-opt
1.21657 ++Perform machine dependent optimizations in reorg stage.
1.21658 ++
1.21659 ++@item -masm-addr-pseudos
1.21660 ++@opindex masm-addr-pseudos
1.21661 ++Use assembler pseudo-instructions lda.w and call for handling direct
1.21662 ++addresses. (Enabled by default)
1.21663 ++
1.21664 ++@item -mpart=@var{part}
1.21665 ++@opindex mpart
1.21666 ++Generate code for the specified part. Permissible parts are:
1.21667 ++@samp{ap7000}, @samp{ap7010},@samp{ap7020},
1.21668 ++@samp{uc3a0128}, @samp{uc3a0256}, @samp{uc3a0512},
1.21669 ++@samp{uc3a1128}, @samp{uc3a1256}, @samp{uc3a1512},
1.21670 ++@samp{uc3b064}, @samp{uc3b0128}, @samp{uc3b0256},
1.21671 ++@samp{uc3b164}, @samp{uc3b1128}, @samp{uc3b1256}.
1.21672 +
1.21673 +-@item -mlong-calls
1.21674 +-@itemx -mno-long-calls
1.21675 +-@opindex mlong-calls
1.21676 +-@opindex mno-long-calls
1.21677 +-Tells the compiler to perform function calls by first loading the
1.21678 +-address of the function into a register and then performing a subroutine
1.21679 +-call on this register. This switch is needed if the target function
1.21680 +-will lie outside of the 24 bit addressing range of the offset based
1.21681 +-version of subroutine call instruction.
1.21682 ++@item -mcpu=@var{cpu-type}
1.21683 ++@opindex mcpu
1.21684 ++Same as -mpart. Obsolete.
1.21685 ++
1.21686 ++@item -march=@var{arch}
1.21687 ++@opindex march
1.21688 ++Generate code for the specified architecture. Permissible architectures are:
1.21689 ++@samp{ap} and @samp{uc}.
1.21690 ++
1.21691 ++@item -mfast-float
1.21692 ++@opindex mfast-float
1.21693 ++Enable fast floating-point library that does not conform to ieee but is still good enough
1.21694 ++for most applications. The fast floating-point library does not round to the nearest even
1.21695 ++but away from zero. Enabled by default if the -funsafe-math-optimizations switch is specified.
1.21696 ++
1.21697 ++@item -mimm-in-const-pool
1.21698 ++@opindex mimm-in-const-pool
1.21699 ++Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
1.21700 +
1.21701 +-This feature is not enabled by default. Specifying
1.21702 +-@option{-mno-long-calls} will restore the default behavior. Note these
1.21703 +-switches have no effect on how the compiler generates code to handle
1.21704 +-function calls via function pointers.
1.21705 + @end table
1.21706 +
1.21707 + @node CRIS Options
1.21708 +--- a/gcc/doc/md.texi
1.21709 ++++ b/gcc/doc/md.texi
1.21710 +@@ -1681,6 +1681,80 @@ A memory reference suitable for iWMMXt l
1.21711 + A memory reference suitable for the ARMv4 ldrsb instruction.
1.21712 + @end table
1.21713 +
1.21714 ++@item AVR32 family---@file{avr32.h}
1.21715 ++@table @code
1.21716 ++@item f
1.21717 ++Floating-point registers (f0 to f15) (Reserved for future use)
1.21718 ++
1.21719 ++@item Ku@var{bits}
1.21720 ++Unsigned constant representable with @var{bits} number of bits (Must be
1.21721 ++two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08}
1.21722 ++
1.21723 ++@item Ks@var{bits}
1.21724 ++Signed constant representable with @var{bits} number of bits (Must be
1.21725 ++two digits). I.e: A signed 12-bit constant is written as @samp{Ks12}
1.21726 ++
1.21727 ++@item Is@var{bits}
1.21728 ++The negated range of a signed constant representable with @var{bits}
1.21729 ++number of bits. The same as @samp{Ks@var{bits}} with a negated range.
1.21730 ++This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}}
1.21731 ++
1.21732 ++@item G
1.21733 ++A single/double precision floating-point immediate or 64-bit integer
1.21734 ++immediate where the least and most significant words both can be
1.21735 ++loaded with a move instruction. That is the the integer form of the
1.21736 ++values in the least and most significant words both are in the range
1.21737 ++@math{-2^{20}} to @math{2^{20}-1}.
1.21738 ++
1.21739 ++@item M
1.21740 ++Any 32-bit immediate with the most significant bits set to zero and the
1.21741 ++remaining least significant bits set to one.
1.21742 ++
1.21743 ++@item J
1.21744 ++A 32-bit immediate where all the lower 16-bits are zero.
1.21745 ++
1.21746 ++@item O
1.21747 ++A 32-bit immediate with one bit set and the rest of the bits cleared.
1.21748 ++
1.21749 ++@item N
1.21750 ++A 32-bit immediate with one bit cleared and the rest of the bits set.
1.21751 ++
1.21752 ++@item L
1.21753 ++A 32-bit immediate where all the lower 16-bits are set.
1.21754 ++
1.21755 ++@item Q
1.21756 ++Any AVR32 memory reference except for reference used for the atomic memory (RMW) instructions.
1.21757 ++
1.21758 ++@item RKs@var{bits}
1.21759 ++A memory reference where the address consists of a base register
1.21760 ++plus a signed immediate displacement with range given by @samp{Ks@var{bits}}
1.21761 ++which has the same format as for the signed immediate integer constraint
1.21762 ++given above.
1.21763 ++
1.21764 ++@item RKu@var{bits}
1.21765 ++A memory reference where the address consists of a base register
1.21766 ++plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}}
1.21767 ++which has the same format as for the unsigned immediate integer constraint
1.21768 ++given above.
1.21769 ++
1.21770 ++@item S
1.21771 ++A memory reference with an immediate or register offset
1.21772 ++
1.21773 ++@item T
1.21774 ++A memory reference to a constant pool entry
1.21775 ++
1.21776 ++@item W
1.21777 ++A valid operand for use in the @samp{lda.w} instruction macro when
1.21778 ++relaxing is enabled
1.21779 ++
1.21780 ++@item Y
1.21781 ++A memory reference suitable for the atomic memory (RMW) instructions.
1.21782 ++
1.21783 ++@item Z
1.21784 ++A memory reference valid for coprocessor memory instructions
1.21785 ++
1.21786 ++@end table
1.21787 ++
1.21788 + @item AVR family---@file{config/avr/constraints.md}
1.21789 + @table @code
1.21790 + @item l
1.21791 +--- a/gcc/expmed.c
1.21792 ++++ b/gcc/expmed.c
1.21793 +@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3.
1.21794 + #include "real.h"
1.21795 + #include "recog.h"
1.21796 + #include "langhooks.h"
1.21797 ++#include "target.h"
1.21798 +
1.21799 + static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
1.21800 + unsigned HOST_WIDE_INT,
1.21801 +@@ -454,9 +455,19 @@ store_bit_field (rtx str_rtx, unsigned H
1.21802 + ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
1.21803 + || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
1.21804 + && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
1.21805 +- : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
1.21806 +- || (offset * BITS_PER_UNIT % bitsize == 0
1.21807 +- && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
1.21808 ++ : (
1.21809 ++
1.21810 ++ /* NB! Added for AVR32, and I think this should be true for
1.21811 ++ all targets not using narrow volatile bitfields. If the
1.21812 ++ bitfield is volatile then we need to perform an access
1.21813 ++ consistent with the container type. */
1.21814 ++ !(MEM_VOLATILE_P (op0)
1.21815 ++ && GET_MODE_BITSIZE (GET_MODE (op0)) != bitsize
1.21816 ++ && bitsize < BITS_PER_WORD
1.21817 ++ && !targetm.narrow_volatile_bitfield ())
1.21818 ++ && (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
1.21819 ++ || (offset * BITS_PER_UNIT % bitsize == 0
1.21820 ++ && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0)))))
1.21821 + {
1.21822 + if (MEM_P (op0))
1.21823 + op0 = adjust_address (op0, fieldmode, offset);
1.21824 +@@ -1256,6 +1267,13 @@ extract_bit_field (rtx str_rtx, unsigned
1.21825 + && GET_MODE_SIZE (mode1) != 0
1.21826 + && byte_offset % GET_MODE_SIZE (mode1) == 0)
1.21827 + || (MEM_P (op0)
1.21828 ++ /* NB! Added for AVR32, and I think this should be true for
1.21829 ++ all targets not using narrow volatile bitfields. If the
1.21830 ++ bitfield is volatile then we need to perform an access
1.21831 ++ consistent with the container type. */
1.21832 ++ && !(MEM_VOLATILE_P (op0)
1.21833 ++ && GET_MODE_BITSIZE (GET_MODE (op0)) != bitsize
1.21834 ++ && !targetm.narrow_volatile_bitfield ())
1.21835 + && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1.21836 + || (offset * BITS_PER_UNIT % bitsize == 0
1.21837 + && MEM_ALIGN (op0) % bitsize == 0)))))
1.21838 +--- a/gcc/expr.c
1.21839 ++++ b/gcc/expr.c
1.21840 +@@ -3519,18 +3519,19 @@ emit_single_push_insn (enum machine_mode
1.21841 + }
1.21842 + else
1.21843 + {
1.21844 ++ emit_move_insn (stack_pointer_rtx,
1.21845 ++ expand_binop (Pmode,
1.21846 + #ifdef STACK_GROWS_DOWNWARD
1.21847 +- /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
1.21848 +- dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
1.21849 +- GEN_INT (-(HOST_WIDE_INT) rounded_size));
1.21850 ++ sub_optab,
1.21851 + #else
1.21852 +- /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
1.21853 +- dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
1.21854 +- GEN_INT (rounded_size));
1.21855 ++ add_optab,
1.21856 + #endif
1.21857 +- dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
1.21858 ++ stack_pointer_rtx,
1.21859 ++ GEN_INT (rounded_size),
1.21860 ++ NULL_RTX, 0, OPTAB_LIB_WIDEN));
1.21861 ++ dest_addr = stack_pointer_rtx;
1.21862 + }
1.21863 +-
1.21864 ++
1.21865 + dest = gen_rtx_MEM (mode, dest_addr);
1.21866 +
1.21867 + if (type != 0)
1.21868 +@@ -5509,7 +5510,21 @@ store_field (rtx target, HOST_WIDE_INT b
1.21869 + is a bit field, we cannot use addressing to access it.
1.21870 + Use bit-field techniques or SUBREG to store in it. */
1.21871 +
1.21872 +- if (mode == VOIDmode
1.21873 ++ if (
1.21874 ++ /* NB! Added for AVR32, and I think this should be true for
1.21875 ++ all targets not using narrow volatile bitfields. If the
1.21876 ++ bitfield is volatile then we need to perform an access
1.21877 ++ consistent with the container type. */
1.21878 ++ (MEM_P (target)
1.21879 ++ && MEM_VOLATILE_P (target)
1.21880 ++ && ((GET_MODE (target) != BLKmode
1.21881 ++ && GET_MODE_BITSIZE (GET_MODE (target)) > bitsize )
1.21882 ++ /* If BLKmode, check if this is a record. Do not know
1.21883 ++ if this is really necesarry though...*/
1.21884 ++ || (GET_MODE (target) == BLKmode
1.21885 ++ && TREE_CODE (type) == RECORD_TYPE))
1.21886 ++ && !targetm.narrow_volatile_bitfield ())
1.21887 ++ || mode == VOIDmode
1.21888 + || (mode != BLKmode && ! direct_store[(int) mode]
1.21889 + && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
1.21890 + && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
1.21891 +@@ -7560,7 +7575,21 @@ expand_expr_real_1 (tree exp, rtx target
1.21892 + by doing the extract into an object as wide as the field
1.21893 + (which we know to be the width of a basic mode), then
1.21894 + storing into memory, and changing the mode to BLKmode. */
1.21895 +- if (mode1 == VOIDmode
1.21896 ++ if (
1.21897 ++ /* NB! Added for AVR32, and I think this should be true for
1.21898 ++ all targets not using narrow volatile bitfields. If the
1.21899 ++ bitfield is volatile then we need to perform an access
1.21900 ++ consistent with the container type. */
1.21901 ++ (MEM_P (op0)
1.21902 ++ && MEM_VOLATILE_P (op0)
1.21903 ++ && ((GET_MODE (op0) != BLKmode
1.21904 ++ && GET_MODE_BITSIZE (GET_MODE (op0)) > bitsize )
1.21905 ++ /* If BLKmode, check if this is a record. Do not know
1.21906 ++ if this is really necesarry though...*/
1.21907 ++ || (GET_MODE (op0) == BLKmode
1.21908 ++ && TREE_CODE (type) == RECORD_TYPE))
1.21909 ++ && !targetm.narrow_volatile_bitfield ())
1.21910 ++ || mode1 == VOIDmode
1.21911 + || REG_P (op0) || GET_CODE (op0) == SUBREG
1.21912 + || (mode1 != BLKmode && ! direct_load[(int) mode1]
1.21913 + && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
1.21914 +--- a/gcc/flow.c
1.21915 ++++ b/gcc/flow.c
1.21916 +@@ -3327,12 +3327,12 @@ not_reg_cond (rtx x)
1.21917 + if (GET_CODE (x) == NOT)
1.21918 + return XEXP (x, 0);
1.21919 + if (COMPARISON_P (x)
1.21920 +- && REG_P (XEXP (x, 0)))
1.21921 ++ /* && REG_P (XEXP (x, 0))*/)
1.21922 + {
1.21923 +- gcc_assert (XEXP (x, 1) == const0_rtx);
1.21924 ++ /*gcc_assert (XEXP (x, 1) == const0_rtx);*/
1.21925 +
1.21926 + return gen_rtx_fmt_ee (reversed_comparison_code (x, NULL),
1.21927 +- VOIDmode, XEXP (x, 0), const0_rtx);
1.21928 ++ VOIDmode, XEXP (x, 0), XEXP (x, 0) /*const0_rtx*/);
1.21929 + }
1.21930 + return gen_rtx_NOT (0, x);
1.21931 + }
1.21932 +--- a/gcc/function.c
1.21933 ++++ b/gcc/function.c
1.21934 +@@ -2676,8 +2676,12 @@ assign_parm_setup_reg (struct assign_par
1.21935 + SET_DECL_RTL (parm, parmreg);
1.21936 +
1.21937 + /* Copy the value into the register. */
1.21938 +- if (data->nominal_mode != data->passed_mode
1.21939 +- || promoted_nominal_mode != data->promoted_mode)
1.21940 ++ if ( (data->nominal_mode != data->passed_mode
1.21941 ++ /* Added for AVR32: If passed_mode is equal
1.21942 ++ to promoted nominal mode why should be convert?
1.21943 ++ The conversion should make no difference. */
1.21944 ++ && data->passed_mode != promoted_nominal_mode)
1.21945 ++ || promoted_nominal_mode != data->promoted_mode)
1.21946 + {
1.21947 + int save_tree_used;
1.21948 +
1.21949 +--- a/gcc/genemit.c
1.21950 ++++ b/gcc/genemit.c
1.21951 +@@ -121,6 +121,24 @@ max_operand_vec (rtx insn, int arg)
1.21952 + }
1.21953 +
1.21954 + static void
1.21955 ++gen_vararg_prologue(int operands)
1.21956 ++{
1.21957 ++ int i;
1.21958 ++
1.21959 ++ if (operands > 1)
1.21960 ++ {
1.21961 ++ for (i = 1; i < operands; i++)
1.21962 ++ printf(" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
1.21963 ++
1.21964 ++ printf(" va_list args;\n\n");
1.21965 ++ printf(" va_start(args, operand0);\n");
1.21966 ++ for (i = 1; i < operands; i++)
1.21967 ++ printf(" operand%d = va_arg(args, rtx);\n", i);
1.21968 ++ printf(" va_end(args);\n\n");
1.21969 ++ }
1.21970 ++}
1.21971 ++
1.21972 ++static void
1.21973 + print_code (RTX_CODE code)
1.21974 + {
1.21975 + const char *p1;
1.21976 +@@ -405,18 +423,16 @@ gen_insn (rtx insn, int lineno)
1.21977 + fatal ("match_dup operand number has no match_operand");
1.21978 +
1.21979 + /* Output the function name and argument declarations. */
1.21980 +- printf ("rtx\ngen_%s (", XSTR (insn, 0));
1.21981 ++ printf ("rtx\ngen_%s ", XSTR (insn, 0));
1.21982 ++
1.21983 + if (operands)
1.21984 +- for (i = 0; i < operands; i++)
1.21985 +- if (i)
1.21986 +- printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i);
1.21987 +- else
1.21988 +- printf ("rtx operand%d ATTRIBUTE_UNUSED", i);
1.21989 ++ printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
1.21990 + else
1.21991 +- printf ("void");
1.21992 +- printf (")\n");
1.21993 ++ printf("(void)\n");
1.21994 + printf ("{\n");
1.21995 +
1.21996 ++ gen_vararg_prologue(operands);
1.21997 ++
1.21998 + /* Output code to construct and return the rtl for the instruction body. */
1.21999 +
1.22000 + if (XVECLEN (insn, 1) == 1)
1.22001 +@@ -456,16 +472,12 @@ gen_expand (rtx expand)
1.22002 + operands = max_operand_vec (expand, 1);
1.22003 +
1.22004 + /* Output the function name and argument declarations. */
1.22005 +- printf ("rtx\ngen_%s (", XSTR (expand, 0));
1.22006 ++ printf ("rtx\ngen_%s ", XSTR (expand, 0));
1.22007 + if (operands)
1.22008 +- for (i = 0; i < operands; i++)
1.22009 +- if (i)
1.22010 +- printf (",\n\trtx operand%d", i);
1.22011 +- else
1.22012 +- printf ("rtx operand%d", i);
1.22013 ++ printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
1.22014 + else
1.22015 +- printf ("void");
1.22016 +- printf (")\n");
1.22017 ++ printf("(void)\n");
1.22018 ++
1.22019 + printf ("{\n");
1.22020 +
1.22021 + /* If we don't have any C code to write, only one insn is being written,
1.22022 +@@ -475,6 +487,8 @@ gen_expand (rtx expand)
1.22023 + && operands > max_dup_opno
1.22024 + && XVECLEN (expand, 1) == 1)
1.22025 + {
1.22026 ++ gen_vararg_prologue(operands);
1.22027 ++
1.22028 + printf (" return ");
1.22029 + gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL);
1.22030 + printf (";\n}\n\n");
1.22031 +@@ -488,6 +502,7 @@ gen_expand (rtx expand)
1.22032 + for (; i <= max_scratch_opno; i++)
1.22033 + printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
1.22034 + printf (" rtx _val = 0;\n");
1.22035 ++ gen_vararg_prologue(operands);
1.22036 + printf (" start_sequence ();\n");
1.22037 +
1.22038 + /* The fourth operand of DEFINE_EXPAND is some code to be executed
1.22039 +--- a/gcc/genflags.c
1.22040 ++++ b/gcc/genflags.c
1.22041 +@@ -127,7 +127,6 @@ static void
1.22042 + gen_proto (rtx insn)
1.22043 + {
1.22044 + int num = num_operands (insn);
1.22045 +- int i;
1.22046 + const char *name = XSTR (insn, 0);
1.22047 + int truth = maybe_eval_c_test (XSTR (insn, 2));
1.22048 +
1.22049 +@@ -158,12 +157,7 @@ gen_proto (rtx insn)
1.22050 + if (num == 0)
1.22051 + fputs ("void", stdout);
1.22052 + else
1.22053 +- {
1.22054 +- for (i = 1; i < num; i++)
1.22055 +- fputs ("rtx, ", stdout);
1.22056 +-
1.22057 +- fputs ("rtx", stdout);
1.22058 +- }
1.22059 ++ fputs("rtx, ...", stdout);
1.22060 +
1.22061 + puts (");");
1.22062 +
1.22063 +@@ -173,12 +167,7 @@ gen_proto (rtx insn)
1.22064 + {
1.22065 + printf ("static inline rtx\ngen_%s", name);
1.22066 + if (num > 0)
1.22067 +- {
1.22068 +- putchar ('(');
1.22069 +- for (i = 0; i < num-1; i++)
1.22070 +- printf ("rtx ARG_UNUSED (%c), ", 'a' + i);
1.22071 +- printf ("rtx ARG_UNUSED (%c))\n", 'a' + i);
1.22072 +- }
1.22073 ++ puts("(rtx ARG_UNUSED(a), ...)");
1.22074 + else
1.22075 + puts ("(void)");
1.22076 + puts ("{\n return 0;\n}");
1.22077 +--- a/gcc/genoutput.c
1.22078 ++++ b/gcc/genoutput.c
1.22079 +@@ -386,7 +386,7 @@ output_insn_data (void)
1.22080 + }
1.22081 +
1.22082 + if (d->name && d->name[0] != '*')
1.22083 +- printf (" (insn_gen_fn) gen_%s,\n", d->name);
1.22084 ++ printf (" gen_%s,\n", d->name);
1.22085 + else
1.22086 + printf (" 0,\n");
1.22087 +
1.22088 +--- a/gcc/ifcvt.c
1.22089 ++++ b/gcc/ifcvt.c
1.22090 +@@ -77,7 +77,7 @@ static int num_possible_if_blocks;
1.22091 + static int num_updated_if_blocks;
1.22092 +
1.22093 + /* # of changes made which require life information to be updated. */
1.22094 +-static int num_true_changes;
1.22095 ++int num_true_changes;
1.22096 +
1.22097 + /* Whether conditional execution changes were made. */
1.22098 + static int cond_exec_changed_p;
1.22099 +@@ -287,12 +287,15 @@ cond_exec_process_insns (ce_if_block_t *
1.22100 + if (must_be_last)
1.22101 + return FALSE;
1.22102 +
1.22103 +- if (modified_in_p (test, insn))
1.22104 +- {
1.22105 +- if (!mod_ok)
1.22106 +- return FALSE;
1.22107 +- must_be_last = TRUE;
1.22108 +- }
1.22109 ++#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
1.22110 ++ if ( !IFCVT_ALLOW_MODIFY_TEST_IN_INSN )
1.22111 ++#endif
1.22112 ++ if (modified_in_p (test, insn))
1.22113 ++ {
1.22114 ++ if (!mod_ok)
1.22115 ++ return FALSE;
1.22116 ++ must_be_last = TRUE;
1.22117 ++ }
1.22118 +
1.22119 + /* Now build the conditional form of the instruction. */
1.22120 + pattern = PATTERN (insn);
1.22121 +@@ -566,16 +569,19 @@ cond_exec_process_if_block (ce_if_block_
1.22122 + /* Do any machine dependent final modifications. */
1.22123 + IFCVT_MODIFY_FINAL (ce_info);
1.22124 + #endif
1.22125 +-
1.22126 +- /* Conversion succeeded. */
1.22127 +- if (dump_file)
1.22128 +- fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
1.22129 +- n_insns, (n_insns == 1) ? " was" : "s were");
1.22130 +-
1.22131 ++
1.22132 + /* Merge the blocks! */
1.22133 +- merge_if_block (ce_info);
1.22134 +- cond_exec_changed_p = TRUE;
1.22135 +- return TRUE;
1.22136 ++ if ( reload_completed ){
1.22137 ++ /* Conversion succeeded. */
1.22138 ++ if (dump_file)
1.22139 ++ fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
1.22140 ++ n_insns, (n_insns == 1) ? " was" : "s were");
1.22141 ++
1.22142 ++ merge_if_block (ce_info);
1.22143 ++ cond_exec_changed_p = TRUE;
1.22144 ++ return TRUE;
1.22145 ++ }
1.22146 ++ return FALSE;
1.22147 +
1.22148 + fail:
1.22149 + #ifdef IFCVT_MODIFY_CANCEL
1.22150 +@@ -1050,7 +1056,11 @@ noce_try_addcc (struct noce_if_info *if_
1.22151 + != UNKNOWN))
1.22152 + {
1.22153 + rtx cond = if_info->cond;
1.22154 +- enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
1.22155 ++ /* This generates wrong code for AVR32. The cond code need not be reversed
1.22156 ++ since the addmodecc patterns add if the condition is NOT met. */
1.22157 ++ /* enum rtx_code code = reversed_comparison_code (cond, if_info->jump);*/
1.22158 ++ enum rtx_code code = GET_CODE(cond);
1.22159 ++
1.22160 +
1.22161 + /* First try to use addcc pattern. */
1.22162 + if (general_operand (XEXP (cond, 0), VOIDmode)
1.22163 +@@ -2651,7 +2661,12 @@ process_if_block (struct ce_if_block * c
1.22164 + && cond_move_process_if_block (ce_info))
1.22165 + return TRUE;
1.22166 +
1.22167 +- if (HAVE_conditional_execution && reload_completed)
1.22168 ++ if (HAVE_conditional_execution &&
1.22169 ++#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
1.22170 ++ (reload_completed || IFCVT_COND_EXEC_BEFORE_RELOAD))
1.22171 ++#else
1.22172 ++ reload_completed)
1.22173 ++#endif
1.22174 + {
1.22175 + /* If we have && and || tests, try to first handle combining the && and
1.22176 + || tests into the conditional code, and if that fails, go back and
1.22177 +@@ -4036,6 +4051,15 @@ rest_of_handle_if_after_reload (void)
1.22178 + cleanup_cfg (CLEANUP_EXPENSIVE
1.22179 + | CLEANUP_UPDATE_LIFE
1.22180 + | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0));
1.22181 ++
1.22182 ++ /* Hack for the AVR32 experimental ifcvt processing before reload.
1.22183 ++ The AVR32 specific ifcvt code needs to know when ifcvt after reload
1.22184 ++ has begun. */
1.22185 ++#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
1.22186 ++ if ( IFCVT_COND_EXEC_BEFORE_RELOAD )
1.22187 ++ cfun->machine->ifcvt_after_reload = 1;
1.22188 ++#endif
1.22189 ++
1.22190 + if (flag_if_conversion2)
1.22191 + if_convert (1);
1.22192 + return 0;
1.22193 +--- a/gcc/longlong.h
1.22194 ++++ b/gcc/longlong.h
1.22195 +@@ -226,6 +226,41 @@ UDItype __umulsidi3 (USItype, USItype);
1.22196 + #define UDIV_TIME 100
1.22197 + #endif /* __arm__ */
1.22198 +
1.22199 ++#if defined (__avr32__) && W_TYPE_SIZE == 32
1.22200 ++#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1.22201 ++ __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3" \
1.22202 ++ : "=r" ((USItype) (sh)), \
1.22203 ++ "=&r" ((USItype) (sl)) \
1.22204 ++ : "r" ((USItype) (ah)), \
1.22205 ++ "r" ((USItype) (bh)), \
1.22206 ++ "r" ((USItype) (al)), \
1.22207 ++ "r" ((USItype) (bl)) __CLOBBER_CC)
1.22208 ++#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1.22209 ++ __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
1.22210 ++ : "=r" ((USItype) (sh)), \
1.22211 ++ "=&r" ((USItype) (sl)) \
1.22212 ++ : "r" ((USItype) (ah)), \
1.22213 ++ "r" ((USItype) (bh)), \
1.22214 ++ "r" ((USItype) (al)), \
1.22215 ++ "r" ((USItype) (bl)) __CLOBBER_CC)
1.22216 ++
1.22217 ++#if !defined (__AVR32_UC__) || __AVR32_UC__ != 3
1.22218 ++#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b))
1.22219 ++
1.22220 ++#define umul_ppmm(w1, w0, u, v) \
1.22221 ++{ \
1.22222 ++ DWunion __w; \
1.22223 ++ __w.ll = __umulsidi3 (u, v); \
1.22224 ++ w1 = __w.s.high; \
1.22225 ++ w0 = __w.s.low; \
1.22226 ++}
1.22227 ++#endif
1.22228 ++
1.22229 ++#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
1.22230 ++#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
1.22231 ++#define COUNT_LEADING_ZEROS_0 32
1.22232 ++#endif
1.22233 ++
1.22234 + #if defined (__hppa) && W_TYPE_SIZE == 32
1.22235 + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1.22236 + __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \
1.22237 +--- a/gcc/optabs.h
1.22238 ++++ b/gcc/optabs.h
1.22239 +@@ -431,7 +431,7 @@ extern enum insn_code reload_out_optab[N
1.22240 + extern GTY(()) optab code_to_optab[NUM_RTX_CODE + 1];
1.22241 +
1.22242 +
1.22243 +-typedef rtx (*rtxfun) (rtx);
1.22244 ++typedef rtx (*rtxfun) (rtx, ...);
1.22245 +
1.22246 + /* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...)
1.22247 + gives the gen_function to make a branch to test that condition. */
1.22248 +--- a/gcc/regrename.c
1.22249 ++++ b/gcc/regrename.c
1.22250 +@@ -1592,6 +1592,9 @@ copyprop_hardreg_forward_1 (basic_block
1.22251 + bool changed = false;
1.22252 + rtx insn;
1.22253 +
1.22254 ++ rtx prev_pred_test;
1.22255 ++ int prev_pred_insn_skipped = 0;
1.22256 ++
1.22257 + for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
1.22258 + {
1.22259 + int n_ops, i, alt, predicated;
1.22260 +@@ -1630,7 +1633,60 @@ copyprop_hardreg_forward_1 (basic_block
1.22261 + || (predicated && recog_data.operand_type[i] == OP_OUT))
1.22262 + recog_data.operand_type[i] = OP_INOUT;
1.22263 + }
1.22264 ++
1.22265 +
1.22266 ++ /* Added for targets (AVR32) which supports test operands to be modified
1.22267 ++ in cond_exec instruction. For these targets we cannot make a change to
1.22268 ++ the test operands if one of the test operands is an output operand This beacuse
1.22269 ++ changing the test operands might cause the need for inserting a new test
1.22270 ++ insns in the middle of a sequence of cond_exec insns and if the test operands
1.22271 ++ are modified these tests will fail.
1.22272 ++ */
1.22273 ++
1.22274 ++ if ( IFCVT_ALLOW_MODIFY_TEST_IN_INSN
1.22275 ++ && predicated )
1.22276 ++ {
1.22277 ++ int insn_skipped = 0;
1.22278 ++ rtx test = COND_EXEC_TEST (PATTERN (insn));
1.22279 ++
1.22280 ++ /* Check if the previous insn was a skipped predicated insn with the same
1.22281 ++ test as this predicated insns. If so we cannot do any modification to
1.22282 ++ this insn either since we cannot emit the test insn because the operands
1.22283 ++ are clobbered. */
1.22284 ++ if ( prev_pred_insn_skipped
1.22285 ++ && (rtx_equal_p (test, prev_pred_test)
1.22286 ++ || rtx_equal_p (test, reversed_condition (prev_pred_test))) )
1.22287 ++ {
1.22288 ++ insn_skipped = 1;
1.22289 ++ }
1.22290 ++ else
1.22291 ++ {
1.22292 ++ /* Check if the output operand is used in the test expression. */
1.22293 ++ for (i = 0; i < n_ops; ++i)
1.22294 ++ if ( recog_data.operand_type[i] == OP_INOUT
1.22295 ++ && reg_mentioned_p (recog_data.operand[i], test) )
1.22296 ++ {
1.22297 ++ insn_skipped = 1;
1.22298 ++ break;
1.22299 ++ }
1.22300 ++
1.22301 ++ }
1.22302 ++
1.22303 ++ prev_pred_test = test;
1.22304 ++ prev_pred_insn_skipped = insn_skipped;
1.22305 ++ if ( insn_skipped )
1.22306 ++ {
1.22307 ++ if (insn == BB_END (bb))
1.22308 ++ break;
1.22309 ++ else
1.22310 ++ continue;
1.22311 ++ }
1.22312 ++ }
1.22313 ++ else
1.22314 ++ {
1.22315 ++ prev_pred_insn_skipped = 0;
1.22316 ++ }
1.22317 ++
1.22318 + /* For each earlyclobber operand, zap the value data. */
1.22319 + for (i = 0; i < n_ops; i++)
1.22320 + if (recog_op_alt[i][alt].earlyclobber)
1.22321 +--- a/gcc/reload.c
1.22322 ++++ b/gcc/reload.c
1.22323 +@@ -4574,7 +4574,7 @@ find_reloads_toplev (rtx x, int opnum, e
1.22324 + x = mem;
1.22325 + i = find_reloads_address (GET_MODE (x), &x, XEXP (x, 0), &XEXP (x, 0),
1.22326 + opnum, type, ind_levels, insn);
1.22327 +- if (x != mem)
1.22328 ++ if (!rtx_equal_p (x, mem))
1.22329 + push_reg_equiv_alt_mem (regno, x);
1.22330 + if (address_reloaded)
1.22331 + *address_reloaded = i;
1.22332 +--- a/gcc/sched-deps.c
1.22333 ++++ b/gcc/sched-deps.c
1.22334 +@@ -649,7 +649,14 @@ fixup_sched_groups (rtx insn)
1.22335 +
1.22336 + prev_nonnote = prev_nonnote_insn (insn);
1.22337 + if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1.22338 +- && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1.22339 ++ /* Modification for AVR32 by RP: Why is this here, this will
1.22340 ++ cause instruction to be without any dependencies which might
1.22341 ++ cause it to be moved anywhere. For the AVR32 we try to keep
1.22342 ++ a group of conditionals together even if they are mutual exclusive.
1.22343 ++ */
1.22344 ++ && (! sched_insns_conditions_mutex_p (insn, prev_nonnote)
1.22345 ++ || GET_CODE (PATTERN (insn)) == COND_EXEC )
1.22346 ++ )
1.22347 + add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1.22348 + }
1.22349 +
1.22350 +@@ -1123,8 +1130,29 @@ sched_analyze_insn (struct deps *deps, r
1.22351 +
1.22352 + if (code == COND_EXEC)
1.22353 + {
1.22354 ++#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
1.22355 ++ if (IFCVT_ALLOW_MODIFY_TEST_IN_INSN)
1.22356 ++ {
1.22357 ++ /* Check if we have a group og conditional instructions with the same test.
1.22358 ++ If so we must make sure that they are not scheduled apart in order to
1.22359 ++ avoid unnecesarry tests and if one of the registers in the test is modified
1.22360 ++ in the instruction this is needed to ensure correct code. */
1.22361 ++ if ( prev_nonnote_insn (insn)
1.22362 ++ && INSN_P (prev_nonnote_insn (insn))
1.22363 ++ && GET_CODE (PATTERN (prev_nonnote_insn (insn))) == COND_EXEC
1.22364 ++ && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 0), XEXP (COND_EXEC_TEST (x), 0))
1.22365 ++ && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 1), XEXP (COND_EXEC_TEST (x), 1))
1.22366 ++ && ( GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == GET_CODE (COND_EXEC_TEST (x))
1.22367 ++ || GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == reversed_comparison_code (COND_EXEC_TEST (x), insn)))
1.22368 ++ {
1.22369 ++ SCHED_GROUP_P (insn) = 1;
1.22370 ++ //CANT_MOVE (prev_nonnote_insn (insn)) = 1;
1.22371 ++ }
1.22372 ++ }
1.22373 ++#endif
1.22374 + sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
1.22375 +
1.22376 ++
1.22377 + /* ??? Should be recording conditions so we reduce the number of
1.22378 + false dependencies. */
1.22379 + x = COND_EXEC_CODE (x);
1.22380 +--- a/gcc/testsuite/gcc.dg/sibcall-3.c
1.22381 ++++ b/gcc/testsuite/gcc.dg/sibcall-3.c
1.22382 +@@ -5,7 +5,7 @@
1.22383 + Copyright (C) 2002 Free Software Foundation Inc.
1.22384 + Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
1.22385 +
1.22386 +-/* { dg-do run { xfail arc-*-* avr-*-* c4x-*-* cris-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* m681?-*-* m680*-*-* m68k-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */
1.22387 ++/* { dg-do run { xfail arc-*-* avr-*-* avr32-*-* c4x-*-* cris-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* m681?-*-* m680*-*-* m68k-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */
1.22388 + /* { dg-options "-O2 -foptimize-sibling-calls" } */
1.22389 +
1.22390 + /* The option -foptimize-sibling-calls is the default, but serves as
1.22391 +--- a/gcc/testsuite/gcc.dg/sibcall-4.c
1.22392 ++++ b/gcc/testsuite/gcc.dg/sibcall-4.c
1.22393 +@@ -5,7 +5,7 @@
1.22394 + Copyright (C) 2002 Free Software Foundation Inc.
1.22395 + Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
1.22396 +
1.22397 +-/* { dg-do run { xfail arc-*-* avr-*-* c4x-*-* cris-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* m681?-*-* m680*-*-* m68k-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */
1.22398 ++/* { dg-do run { xfail arc-*-* avr-*-* avr32-*-* c4x-*-* cris-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* m681?-*-* m680*-*-* m68k-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */
1.22399 + /* { dg-options "-O2 -foptimize-sibling-calls" } */
1.22400 +
1.22401 + /* The option -foptimize-sibling-calls is the default, but serves as
1.22402 +--- a/gcc/testsuite/gcc.dg/trampoline-1.c
1.22403 ++++ b/gcc/testsuite/gcc.dg/trampoline-1.c
1.22404 +@@ -46,6 +46,8 @@ void foo (void)
1.22405 +
1.22406 + int main (void)
1.22407 + {
1.22408 ++#ifndef NO_TRAMPOLINES
1.22409 + foo ();
1.22410 ++#endif
1.22411 + return 0;
1.22412 + }
1.22413 +--- a/gcc/testsuite/g++.old-deja/g++.pt/static11.C
1.22414 ++++ b/gcc/testsuite/g++.old-deja/g++.pt/static11.C
1.22415 +@@ -2,7 +2,7 @@
1.22416 + // in their dejagnu baseboard description) require that the status is
1.22417 + // final when exit is entered (or main returns), and not "overruled" by a
1.22418 + // destructor calling _exit. It's not really worth it to handle that.
1.22419 +-// { dg-do run { xfail mmix-knuth-mmixware xtensa-*-elf* arm*-*-elf arm*-*-eabi m68k-*-elf } }
1.22420 ++// { dg-do run { xfail mmix-knuth-mmixware xtensa-*-elf* avr32-*-elf arm*-*-elf arm*-*-eabi m68k-*-elf } }
1.22421 +
1.22422 + // Bug: g++ was failing to destroy C<int>::a because it was using two
1.22423 + // different sentry variables for construction and destruction.
1.22424 +--- a/gcc/version.c
1.22425 ++++ b/gcc/version.c
1.22426 +@@ -8,7 +8,7 @@
1.22427 + in parentheses. You may also wish to include a number indicating
1.22428 + the revision of your modified compiler. */
1.22429 +
1.22430 +-#define VERSUFFIX ""
1.22431 ++#define VERSUFFIX "-atmel.1.1.3.avr32linux.1"
1.22432 +
1.22433 + /* This is the location of the online document giving instructions for
1.22434 + reporting bugs. If you distribute a modified version of GCC,
1.22435 +@@ -17,9 +17,9 @@
1.22436 + forward us bugs reported to you, if you determine that they are
1.22437 + not bugs in your modifications.) */
1.22438 +
1.22439 +-const char bug_report_url[] = "<URL:http://gcc.gnu.org/bugs.html>";
1.22440 ++const char bug_report_url[] = "<URL:http://www.atmel.com/avr32/>";
1.22441 +
1.22442 + /* The complete version string, assembled from several pieces.
1.22443 + BASEVER, DATESTAMP, and DEVPHASE are defined by the Makefile. */
1.22444 +
1.22445 +-const char version_string[] = BASEVER DATESTAMP DEVPHASE VERSUFFIX;
1.22446 ++const char version_string[] = BASEVER VERSUFFIX DATESTAMP DEVPHASE;
1.22447 +--- a/libstdc++-v3/acinclude.m4
1.22448 ++++ b/libstdc++-v3/acinclude.m4
1.22449 +@@ -125,15 +125,6 @@ AC_DEFUN([GLIBCXX_CONFIGURE], [
1.22450 + ## other macros from doing the same. This should be automated.) -pme
1.22451 + need_libmath=no
1.22452 +
1.22453 +- # Check for uClibc since Linux platforms use different configuration
1.22454 +- # directories depending on the C library in use.
1.22455 +- AC_EGREP_CPP([_using_uclibc], [
1.22456 +- #include <stdio.h>
1.22457 +- #if __UCLIBC__
1.22458 +- _using_uclibc
1.22459 +- #endif
1.22460 +- ], uclibc=yes, uclibc=no)
1.22461 +-
1.22462 + # Find platform-specific directories containing configuration info.
1.22463 + # Also possibly modify flags used elsewhere, as needed by the platform.
1.22464 + GLIBCXX_CHECK_HOST
1.22465 +@@ -1389,8 +1380,8 @@ AC_DEFUN([GLIBCXX_ENABLE_CLOCALE], [
1.22466 + #endif
1.22467 + int main()
1.22468 + {
1.22469 +- const char __one[] = "Äuglein Augmen";
1.22470 +- const char __two[] = "Äuglein";
1.22471 ++ const char __one[] = "Äuglein Augmen";
1.22472 ++ const char __two[] = "Äuglein";
1.22473 + int i;
1.22474 + int j;
1.22475 + __locale_t loc;
1.22476 +--- a/libstdc++-v3/config/os/gnu-linux/ctype_base.h
1.22477 ++++ b/libstdc++-v3/config/os/gnu-linux/ctype_base.h
1.22478 +@@ -31,6 +31,8 @@
1.22479 + //
1.22480 + // ISO C++ 14882: 22.1 Locales
1.22481 + //
1.22482 ++#include <features.h>
1.22483 ++#include <ctype.h>
1.22484 +
1.22485 + /** @file ctype_base.h
1.22486 + * This is an internal header file, included by other library headers.
1.22487 +@@ -45,8 +47,12 @@ _GLIBCXX_BEGIN_NAMESPACE(std)
1.22488 + struct ctype_base
1.22489 + {
1.22490 + // Non-standard typedefs.
1.22491 +- typedef const int* __to_type;
1.22492 +-
1.22493 ++#ifdef __UCLIBC__
1.22494 ++ typedef const __ctype_touplow_t* __to_type;
1.22495 ++#else
1.22496 ++ typedef const int* __to_type;
1.22497 ++#endif
1.22498 ++
1.22499 + // NB: Offsets into ctype<char>::_M_table force a particular size
1.22500 + // on the mask type. Because of this, we don't use an enum.
1.22501 + typedef unsigned short mask;
1.22502 +--- a/libstdc++-v3/include/Makefile.in
1.22503 ++++ b/libstdc++-v3/include/Makefile.in
1.22504 +@@ -36,6 +36,7 @@ POST_UNINSTALL = :
1.22505 + build_triplet = @build@
1.22506 + host_triplet = @host@
1.22507 + target_triplet = @target@
1.22508 ++LIBOBJDIR =
1.22509 + DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
1.22510 + $(top_srcdir)/fragment.am
1.22511 + subdir = include
1.22512 +--- a/libstdc++-v3/libmath/Makefile.in
1.22513 ++++ b/libstdc++-v3/libmath/Makefile.in
1.22514 +@@ -37,6 +37,7 @@ POST_UNINSTALL = :
1.22515 + build_triplet = @build@
1.22516 + host_triplet = @host@
1.22517 + target_triplet = @target@
1.22518 ++LIBOBJDIR =
1.22519 + subdir = libmath
1.22520 + DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
1.22521 + ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
1.22522 +--- a/libstdc++-v3/libsupc++/Makefile.in
1.22523 ++++ b/libstdc++-v3/libsupc++/Makefile.in
1.22524 +@@ -38,6 +38,7 @@ POST_UNINSTALL = :
1.22525 + build_triplet = @build@
1.22526 + host_triplet = @host@
1.22527 + target_triplet = @target@
1.22528 ++LIBOBJDIR =
1.22529 + DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \
1.22530 + $(srcdir)/Makefile.in $(top_srcdir)/fragment.am
1.22531 + subdir = libsupc++
1.22532 +--- a/libstdc++-v3/Makefile.in
1.22533 ++++ b/libstdc++-v3/Makefile.in
1.22534 +@@ -36,6 +36,7 @@ POST_UNINSTALL = :
1.22535 + build_triplet = @build@
1.22536 + host_triplet = @host@
1.22537 + target_triplet = @target@
1.22538 ++LIBOBJDIR =
1.22539 + DIST_COMMON = README $(am__configure_deps) $(srcdir)/../config.guess \
1.22540 + $(srcdir)/../config.sub $(srcdir)/../install-sh \
1.22541 + $(srcdir)/../ltmain.sh $(srcdir)/../missing \
1.22542 +--- a/libstdc++-v3/po/Makefile.in
1.22543 ++++ b/libstdc++-v3/po/Makefile.in
1.22544 +@@ -36,6 +36,7 @@ POST_UNINSTALL = :
1.22545 + build_triplet = @build@
1.22546 + host_triplet = @host@
1.22547 + target_triplet = @target@
1.22548 ++LIBOBJDIR =
1.22549 + DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
1.22550 + $(top_srcdir)/fragment.am
1.22551 + subdir = po
1.22552 +--- a/libstdc++-v3/src/Makefile.in
1.22553 ++++ b/libstdc++-v3/src/Makefile.in
1.22554 +@@ -36,6 +36,7 @@ POST_UNINSTALL = :
1.22555 + build_triplet = @build@
1.22556 + host_triplet = @host@
1.22557 + target_triplet = @target@
1.22558 ++LIBOBJDIR =
1.22559 + DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
1.22560 + $(top_srcdir)/fragment.am
1.22561 + subdir = src
1.22562 +--- a/Makefile.def
1.22563 ++++ b/Makefile.def
1.22564 +@@ -481,7 +481,7 @@ lang_env_dependencies = { module=rda; };
1.22565 + lang_env_dependencies = { module=winsup; };
1.22566 + lang_env_dependencies = { module=qthreads; };
1.22567 +
1.22568 +-dependencies = { module=all-target-libgloss; on=configure-target-newlib; };
1.22569 ++dependencies = { module=all-target-libgloss; on=all-target-newlib; };
1.22570 + dependencies = { module=all-target-winsup; on=all-target-libiberty; };
1.22571 + dependencies = { module=all-target-winsup; on=all-target-libtermcap; };
1.22572 +
1.22573 +--- a/Makefile.in
1.22574 ++++ b/Makefile.in
1.22575 +@@ -43791,7 +43791,7 @@ all-target-libobjc: maybe-all-target-boe
1.22576 + all-target-libstdc++-v3: maybe-all-target-libiberty
1.22577 + install-target-libssp: maybe-install-gcc
1.22578 + install-target-libgomp: maybe-install-gcc
1.22579 +-all-target-libgloss: maybe-configure-target-newlib
1.22580 ++all-target-libgloss: maybe-all-target-newlib
1.22581 + all-target-winsup: maybe-all-target-libiberty
1.22582 + all-target-winsup: maybe-all-target-libtermcap
1.22583 +
1.22584 +--- a/gcc/configure.ac
1.22585 ++++ b/gcc/configure.ac
1.22586 +@@ -2158,7 +2158,7 @@ L2:],
1.22587 + as_ver=`$gcc_cv_as --version 2>/dev/null | sed 1q`
1.22588 + if echo "$as_ver" | grep GNU > /dev/null; then
1.22589 + changequote(,)dnl
1.22590 +- as_ver=`echo $as_ver | sed -e 's/GNU assembler \([0-9.][0-9.]*\).*/\1/'`
1.22591 ++ as_ver=`echo $as_ver | sed -e 's/GNU assembler\( (GNU Binutils)\)\? \([0-9.][0-9.]*\).*/\2/'`
1.22592 + as_major=`echo $as_ver | sed 's/\..*//'`
1.22593 + as_minor=`echo $as_ver | sed 's/[^.]*\.\([0-9]*\).*/\1/'`
1.22594 + changequote([,])dnl
1.22595 +@@ -2971,7 +2971,7 @@ esac
1.22596 + case "$target" in
1.22597 + i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \
1.22598 + | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \
1.22599 +- | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-*)
1.22600 ++ | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | avr32-*-*)
1.22601 + insn="nop"
1.22602 + ;;
1.22603 + ia64*-*-* | s390*-*-*)
1.22604 +--- a/gcc/configure
1.22605 ++++ b/gcc/configure
1.22606 +@@ -14023,7 +14023,7 @@ L2:' > conftest.s
1.22607 + # arbitrary sections are supported and try the test.
1.22608 + as_ver=`$gcc_cv_as --version 2>/dev/null | sed 1q`
1.22609 + if echo "$as_ver" | grep GNU > /dev/null; then
1.22610 +- as_ver=`echo $as_ver | sed -e 's/GNU assembler \([0-9.][0-9.]*\).*/\1/'`
1.22611 ++ as_ver=`echo $as_ver | sed -e 's/GNU assembler\( (GNU Binutils)\)\? \([0-9.][0-9.]*\).*/\2/'`
1.22612 + as_major=`echo $as_ver | sed 's/\..*//'`
1.22613 + as_minor=`echo $as_ver | sed 's/[^.]*\.\([0-9]*\).*/\1/'`
1.22614 + if test $as_major -eq 2 && test $as_minor -lt 11
1.22615 +@@ -15610,7 +15610,7 @@ esac
1.22616 + case "$target" in
1.22617 + i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \
1.22618 + | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \
1.22619 +- | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-*)
1.22620 ++ | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | avr32-*-*)
1.22621 + insn="nop"
1.22622 + ;;
1.22623 + ia64*-*-* | s390*-*-*)